azure-ai-evaluation 1.0.0b5__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. azure/ai/evaluation/_common/_experimental.py +4 -0
  2. azure/ai/evaluation/_common/math.py +62 -2
  3. azure/ai/evaluation/_common/rai_service.py +80 -29
  4. azure/ai/evaluation/_common/utils.py +50 -16
  5. azure/ai/evaluation/_constants.py +1 -0
  6. azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +9 -0
  7. azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +13 -3
  8. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +11 -0
  9. azure/ai/evaluation/_evaluate/_eval_run.py +34 -10
  10. azure/ai/evaluation/_evaluate/_evaluate.py +59 -103
  11. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +2 -1
  12. azure/ai/evaluation/_evaluate/_utils.py +6 -4
  13. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +16 -17
  14. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +60 -29
  15. azure/ai/evaluation/_evaluators/_common/_base_eval.py +17 -5
  16. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +4 -2
  17. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +6 -9
  18. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +56 -50
  19. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +79 -34
  20. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +73 -34
  21. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +74 -33
  22. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +76 -34
  23. azure/ai/evaluation/_evaluators/_eci/_eci.py +28 -3
  24. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
  25. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +57 -26
  26. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +13 -15
  27. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +68 -30
  28. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +17 -20
  29. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +10 -8
  30. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +0 -2
  31. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +6 -2
  32. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +10 -6
  33. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +6 -2
  34. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +6 -2
  35. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +6 -2
  36. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +57 -34
  37. azure/ai/evaluation/_evaluators/_qa/_qa.py +25 -37
  38. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +63 -29
  39. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +76 -161
  40. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +24 -25
  41. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +65 -67
  42. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +26 -20
  43. azure/ai/evaluation/_evaluators/_xpia/xpia.py +74 -40
  44. azure/ai/evaluation/_exceptions.py +2 -0
  45. azure/ai/evaluation/_model_configurations.py +65 -14
  46. azure/ai/evaluation/_version.py +1 -1
  47. azure/ai/evaluation/simulator/_adversarial_scenario.py +15 -1
  48. azure/ai/evaluation/simulator/_adversarial_simulator.py +25 -34
  49. azure/ai/evaluation/simulator/_constants.py +11 -1
  50. azure/ai/evaluation/simulator/_direct_attack_simulator.py +16 -8
  51. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +11 -1
  52. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +3 -1
  53. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +8 -4
  54. azure/ai/evaluation/simulator/_simulator.py +51 -45
  55. azure/ai/evaluation/simulator/_utils.py +25 -7
  56. {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/METADATA +232 -324
  57. {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/RECORD +60 -61
  58. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
  59. {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/NOTICE.txt +0 -0
  60. {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/WHEEL +0 -0
  61. {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.0.1.dist-info}/top_level.txt +0 -0
@@ -28,12 +28,10 @@ class ContentSafetyMultimodalEvaluator:
28
28
  :param azure_ai_project: The scope of the Azure AI project, containing the subscription ID,
29
29
  resource group, and project name.
30
30
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
31
- :param parallel: Specifies whether to use parallel execution for evaluators.
32
- If True, evaluators execute in parallel; otherwise, they execute sequentially. Defaults to True.
33
- :type parallel: bool
31
+ :param kwargs: Additional arguments to pass to the evaluator.
32
+ :type kwargs: Any
34
33
 
35
34
  :return: A function that evaluates multimodal chat messages and generates content safety metrics.
36
- :rtype: Callable
37
35
 
38
36
  **Usage Example**
39
37
 
@@ -45,7 +43,7 @@ class ContentSafetyMultimodalEvaluator:
45
43
  "project_name": "<project_name>",
46
44
  }
47
45
  eval_fn = ContentSafetyMultimodalEvaluator(azure_ai_project)
48
- result = eval_fn(
46
+ result = eval_fn(conversation=
49
47
  {
50
48
  "messages": [
51
49
  {
@@ -92,8 +90,11 @@ class ContentSafetyMultimodalEvaluator:
92
90
 
93
91
  """
94
92
 
95
- def __init__(self, credential, azure_ai_project, parallel: bool = False):
96
- self._parallel = parallel
93
+ id = "content_safety_multimodal"
94
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
95
+
96
+ def __init__(self, credential, azure_ai_project, **kwargs):
97
+ self._parallel = kwargs.pop("_parallel", False)
97
98
  self._evaluators: List[Callable[..., Dict[str, Union[str, float]]]] = [
98
99
  ViolenceMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
99
100
  SexualMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
@@ -104,8 +105,9 @@ class ContentSafetyMultimodalEvaluator:
104
105
  def __call__(self, *, conversation, **kwargs):
105
106
  """
106
107
  Evaluates content-safety metrics for list of messages.
108
+
107
109
  :keyword conversation: The conversation contains list of messages to be evaluated.
108
- Each message should have "role" and "content" keys.
110
+ Each message should have "role" and "content" keys. It supports single turn only.
109
111
  :paramtype conversation: ~azure.ai.evaluation.Conversation
110
112
  :return: The evaluation score based on the Content Safety Metrics.
111
113
  :rtype: Dict[str, Union[float, str]]
@@ -7,10 +7,8 @@ from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service_mu
7
7
  from azure.ai.evaluation._common.constants import EvaluationMetrics, _InternalEvaluationMetrics
8
8
  from azure.ai.evaluation._common.utils import validate_conversation
9
9
  from azure.core.credentials import TokenCredential
10
- from azure.ai.evaluation._common._experimental import experimental
11
10
 
12
11
 
13
- @experimental
14
12
  class ContentSafetyMultimodalEvaluatorBase(ABC):
15
13
  """
16
14
  Initialize a evaluator for a specified Evaluation Metric. Base class that is not
@@ -28,7 +28,7 @@ class HateUnfairnessMultimodalEvaluator:
28
28
  "project_name": "<project_name>",
29
29
  }
30
30
  eval_fn = HateUnfairnessMultimodalEvaluator(azure_ai_project)
31
- result = eval_fn(
31
+ result = eval_fn(conversation=
32
32
  {
33
33
  "messages": [
34
34
  {
@@ -66,14 +66,18 @@ class HateUnfairnessMultimodalEvaluator:
66
66
 
67
67
  """
68
68
 
69
+ id = "hate_unfairness_multimodal"
70
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
71
+
69
72
  def __init__(self, credential, azure_ai_project):
70
73
  self._async_evaluator = _AsyncHateUnfairnessMultimodalEvaluator(credential, azure_ai_project)
71
74
 
72
75
  def __call__(self, *, conversation, **kwargs):
73
76
  """
74
77
  Evaluates hate unfairness content.
78
+
75
79
  :keyword conversation: The conversation contains list of messages to be evaluated.
76
- Each message should have "role" and "content" keys.
80
+ Each message should have "role" and "content" keys. It supports single turn only.
77
81
  :paramtype conversation: ~azure.ai.evaluation.Conversation
78
82
  :return: The hate unfairness score.
79
83
  :rtype: Dict[str, Union[float, str]]
@@ -2,10 +2,10 @@
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
4
  from promptflow._utils.async_utils import async_run_allowing_running_loop
5
- from azure.ai.evaluation._common._experimental import experimental
6
5
  from azure.ai.evaluation._common.constants import EvaluationMetrics
7
6
  from azure.ai.evaluation._common.utils import validate_conversation
8
7
  from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service_multimodal
8
+ from azure.ai.evaluation._common._experimental import experimental
9
9
 
10
10
 
11
11
  @experimental
@@ -22,7 +22,6 @@ class ProtectedMaterialMultimodalEvaluator:
22
22
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
23
23
 
24
24
  :return: A dictionary containing the evaluation result label and reasoning.
25
- :rtype: Dict[str, str]
26
25
 
27
26
  **Usage Example**
28
27
 
@@ -34,7 +33,7 @@ class ProtectedMaterialMultimodalEvaluator:
34
33
  "project_name": "<project_name>",
35
34
  }
36
35
  eval_fn = ProtectedMaterialMultimodalEvaluator(azure_ai_project)
37
- result = eval_fn(
36
+ result = eval_fn(conversation=
38
37
  {
39
38
  "messages": [
40
39
  {
@@ -71,6 +70,9 @@ class ProtectedMaterialMultimodalEvaluator:
71
70
 
72
71
  """
73
72
 
73
+ id = "protected_material_multimodal"
74
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
75
+
74
76
  def __init__(
75
77
  self,
76
78
  credential,
@@ -82,8 +84,9 @@ class ProtectedMaterialMultimodalEvaluator:
82
84
  """
83
85
  Evaluates protected materials content.
84
86
 
85
- :keyword messages: The messages to be evaluated. Each message should have "role" and "content" keys.
86
- :paramtype messages: ~azure.ai.evaluation.Conversation
87
+ :keyword conversation: The conversation contains list of messages to be evaluated.
88
+ Each message should have "role" and "content" keys. It supports single turn only.
89
+ :paramtype conversation: ~azure.ai.evaluation.Conversation
87
90
  :return: A dictionary containing a boolean label and reasoning.
88
91
  :rtype: Dict[str, str]
89
92
  """
@@ -101,8 +104,9 @@ class _AsyncProtectedMaterialMultimodalEvaluator:
101
104
  async def __call__(self, *, conversation, **kwargs):
102
105
  """
103
106
  Evaluates content according to this evaluator's metric.
107
+
104
108
  :keyword conversation: The conversation contains list of messages to be evaluated.
105
- Each message should have "role" and "content" keys.
109
+ Each message should have "role" and "content" keys. It supports single turn only.
106
110
  :paramtype conversation: ~azure.ai.evaluation.Conversation
107
111
  :return: The evaluation score computation based on the Content Safety metric (self.metric).
108
112
  :rtype: Any
@@ -28,7 +28,7 @@ class SelfHarmMultimodalEvaluator:
28
28
  "project_name": "<project_name>",
29
29
  }
30
30
  eval_fn = SelfHarmMultimodalEvaluator(azure_ai_project)
31
- result = eval_fn(
31
+ result = eval_fn(conversation=
32
32
  {
33
33
  "messages": [
34
34
  {
@@ -66,14 +66,18 @@ class SelfHarmMultimodalEvaluator:
66
66
 
67
67
  """
68
68
 
69
+ id = "self_harm_multimodal"
70
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
71
+
69
72
  def __init__(self, credential, azure_ai_project):
70
73
  self._async_evaluator = _AsyncSelfHarmMultimodalEvaluator(credential, azure_ai_project)
71
74
 
72
75
  def __call__(self, *, conversation, **kwargs):
73
76
  """
74
77
  Evaluates self harm content.
78
+
75
79
  :keyword conversation: The conversation contains list of messages to be evaluated.
76
- Each message should have "role" and "content" keys.
80
+ Each message should have "role" and "content" keys. It supports single turn only.
77
81
  :paramtype conversation: ~azure.ai.evaluation.Conversation
78
82
  :return: The self harm score.
79
83
  :rtype: Dict[str, Union[float, str]]
@@ -28,7 +28,7 @@ class SexualMultimodalEvaluator:
28
28
  "project_name": "<project_name>",
29
29
  }
30
30
  eval_fn = SexualMultimodalEvaluator(azure_ai_project)
31
- result = eval_fn(
31
+ result = eval_fn(conversation=
32
32
  {
33
33
  "messages": [
34
34
  {
@@ -66,14 +66,18 @@ class SexualMultimodalEvaluator:
66
66
 
67
67
  """
68
68
 
69
+ id = "sexual_multimodal"
70
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
71
+
69
72
  def __init__(self, credential, azure_ai_project):
70
73
  self._async_evaluator = _AsyncSexualMultimodalEvaluator(credential, azure_ai_project)
71
74
 
72
75
  def __call__(self, *, conversation, **kwargs):
73
76
  """
74
77
  Evaluates sexual content.
78
+
75
79
  :keyword conversation: The conversation contains list of messages to be evaluated.
76
- Each message should have "role" and "content" keys.
80
+ Each message should have "role" and "content" keys. It supports single turn only.
77
81
  :paramtype conversation: ~azure.ai.evaluation.Conversation
78
82
  :return: The sexual score.
79
83
  :rtype: Dict[str, Union[float, str]]
@@ -28,7 +28,7 @@ class ViolenceMultimodalEvaluator:
28
28
  "project_name": "<project_name>",
29
29
  }
30
30
  eval_fn = ViolenceMultimodalEvaluator(azure_ai_project)
31
- result = eval_fn(
31
+ result = eval_fn(conversation=
32
32
  {
33
33
  "messages": [
34
34
  {
@@ -66,14 +66,18 @@ class ViolenceMultimodalEvaluator:
66
66
 
67
67
  """
68
68
 
69
+ id = "violence_multimodal"
70
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
71
+
69
72
  def __init__(self, credential, azure_ai_project):
70
73
  self._async_evaluator = _AsyncViolenceMultimodalEvaluator(credential, azure_ai_project)
71
74
 
72
75
  def __call__(self, *, conversation, **kwargs):
73
76
  """
74
77
  Evaluates violence content.
78
+
75
79
  :keyword conversation: The conversation contains list of messages to be evaluated.
76
- Each message should have "role" and "content" keys.
80
+ Each message should have "role" and "content" keys. It supports single turn only.
77
81
  :paramtype conversation: ~azure.ai.evaluation.Conversation
78
82
  :return: The violence score.
79
83
  :rtype: Dict[str, Union[float, str]]
@@ -2,21 +2,26 @@
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
4
 
5
- from typing import Optional
5
+ from typing import Dict, List, Optional, Union
6
6
 
7
- from typing_extensions import override
7
+ from typing_extensions import overload, override
8
8
 
9
9
  from azure.ai.evaluation._common._experimental import experimental
10
10
  from azure.ai.evaluation._common.constants import EvaluationMetrics
11
11
  from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
12
+ from azure.ai.evaluation._model_configurations import Conversation
12
13
 
13
14
 
14
15
  @experimental
15
- class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase):
16
+ class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
16
17
  """
17
- Initialize a protected material evaluator to detect whether protected material
18
- is present in the AI system's response. The evaluator outputs a Boolean label (`True` or `False`)
19
- indicating the presence of protected material, along with AI-generated reasoning.
18
+ Evaluates the protected material score for a given query and response or a multi-turn conversation, with reasoning.
19
+
20
+ Protected material is any text that is under copyright, including song lyrics, recipes, and articles. Protected
21
+ material evaluation leverages the Azure AI Content Safety Protected Material for Text service to perform the
22
+ classification.
23
+
24
+ The protected material score is a boolean value, where True indicates that protected material was detected.
20
25
 
21
26
  :param credential: The credential required for connecting to the Azure AI project.
22
27
  :type credential: ~azure.core.credentials.TokenCredential
@@ -24,46 +29,64 @@ class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase):
24
29
  resource group, and project name.
25
30
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
26
31
 
27
- :return: A dictionary with a label indicating the presence of protected material and the reasoning.
28
- :rtype: Dict[str, Union[bool, str]]
29
-
30
- **Usage Example**
31
-
32
- .. code-block:: python
33
-
34
- azure_ai_project = {
35
- "subscription_id": "<subscription_id>",
36
- "resource_group_name": "<resource_group_name>",
37
- "project_name": "<project_name>",
38
- }
39
- eval_fn = ProtectedMaterialEvaluator(azure_ai_project)
40
- result = eval_fn(query="What is the capital of France?", response="Paris.")
41
-
42
- **Output Format**
43
-
44
- .. code-block:: json
45
-
46
- {
47
- "protected_material_label": false,
48
- "protected_material_reason": "This query does not contain any protected material."
49
- }
32
+ .. admonition:: Example:
50
33
 
34
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
35
+ :start-after: [START protected_material_evaluator]
36
+ :end-before: [END protected_material_evaluator]
37
+ :language: python
38
+ :dedent: 8
39
+ :caption: Initialize and call a ProtectedMaterialEvaluator.
51
40
  """
52
41
 
42
+ id = "azureml://registries/azureml/models/Protected-Material-Evaluator/versions/3"
43
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
44
+
53
45
  @override
54
46
  def __init__(
55
47
  self,
56
48
  credential,
57
49
  azure_ai_project,
58
- eval_last_turn: bool = False,
59
50
  ):
60
51
  super().__init__(
61
52
  eval_metric=EvaluationMetrics.PROTECTED_MATERIAL,
62
53
  azure_ai_project=azure_ai_project,
63
54
  credential=credential,
64
- eval_last_turn=eval_last_turn,
65
55
  )
66
56
 
57
+ @overload
58
+ def __call__(
59
+ self,
60
+ *,
61
+ query: str,
62
+ response: str,
63
+ ) -> Dict[str, Union[str, bool]]:
64
+ """Evaluate a given query/response pair for protected material
65
+
66
+ :keyword query: The query to be evaluated.
67
+ :paramtype query: str
68
+ :keyword response: The response to be evaluated.
69
+ :paramtype response: str
70
+ :return: The protected material score.
71
+ :rtype: Dict[str, Union[str, bool]]
72
+ """
73
+
74
+ @overload
75
+ def __call__(
76
+ self,
77
+ *,
78
+ conversation: Conversation,
79
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]:
80
+ """Evaluate a conversation for protected material
81
+
82
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
83
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
84
+ to be dictionaries with keys "content", "role", and possibly "context".
85
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
86
+ :return: The protected material score.
87
+ :rtype: Dict[str, Union[str, bool, Dict[str, List[Union[str, bool]]]]]
88
+ """
89
+
67
90
  @override
68
91
  def __call__(
69
92
  self,
@@ -77,14 +100,14 @@ class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase):
77
100
  Evaluate if protected material is present in your AI system's response.
78
101
 
79
102
  :keyword query: The query to be evaluated.
80
- :paramtype query: str
103
+ :paramtype query: Optional[str]
81
104
  :keyword response: The response to be evaluated.
82
- :paramtype response: str
105
+ :paramtype response: Optional[str]
83
106
  :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
84
107
  key "messages". Conversation turns are expected
85
108
  to be dictionaries with keys "content" and "role".
86
109
  :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
87
110
  :return: The fluency score.
88
- :rtype: Union[Dict[str, Union[str, bool]], Dict[str, Union[str, bool, Dict[str, List[Union[str, bool]]]]]]
111
+ :rtype: Union[Dict[str, Union[str, bool]], Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]]
89
112
  """
90
113
  return super().__call__(query=query, response=response, conversation=conversation, **kwargs)
@@ -3,7 +3,7 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  from concurrent.futures import as_completed
6
- from typing import Callable, Dict, List
6
+ from typing import Callable, Dict, List, Union
7
7
 
8
8
  from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
9
9
 
@@ -23,42 +23,32 @@ class QAEvaluator:
23
23
  :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
24
24
  ~azure.ai.evaluation.OpenAIModelConfiguration]
25
25
  :return: A callable class that evaluates and generates metrics for "question-answering" scenario.
26
+ :param kwargs: Additional arguments to pass to the evaluator.
27
+ :type kwargs: Any
26
28
 
27
- **Usage**
28
-
29
- .. code-block:: python
30
-
31
- eval_fn = QAEvaluator(model_config)
32
- result = qa_eval(
33
- query="Tokyo is the capital of which country?",
34
- response="Japan",
35
- context="Tokyo is the capital of Japan.",
36
- ground_truth="Japan"
37
- )
38
-
39
- **Output format**
40
-
41
- .. code-block:: python
42
-
43
- {
44
- "groundedness": 3.5,
45
- "relevance": 4.0,
46
- "coherence": 1.5,
47
- "fluency": 4.0,
48
- "similarity": 3.0,
49
- "gpt_groundedness": 3.5,
50
- "gpt_relevance": 4.0,
51
- "gpt_coherence": 1.5,
52
- "gpt_fluency": 4.0,
53
- "gpt_similarity": 3.0,
54
- "f1_score": 0.42
55
- }
29
+ .. admonition:: Example:
30
+
31
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
32
+ :start-after: [START qa_evaluator]
33
+ :end-before: [END qa_evaluator]
34
+ :language: python
35
+ :dedent: 8
36
+ :caption: Initialize and call a QAEvaluator.
37
+
38
+ .. note::
39
+
40
+ To align with our support of a diverse set of models, keys without the `gpt_` prefix has been added.
41
+ To maintain backwards compatibility, the old keys with the `gpt_` prefix are still be present in the output;
42
+ however, it is recommended to use the new keys moving forward as the old keys will be deprecated in the future.
56
43
  """
57
44
 
58
- def __init__(self, model_config, parallel: bool = True):
59
- self._parallel = parallel
45
+ id = "qa"
46
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
47
+
48
+ def __init__(self, model_config, **kwargs):
49
+ self._parallel = kwargs.pop("_parallel", False)
60
50
 
61
- self._evaluators: List[Callable[..., Dict[str, float]]] = [
51
+ self._evaluators: List[Union[Callable[..., Dict[str, Union[str, float]]], Callable[..., Dict[str, float]]]] = [
62
52
  GroundednessEvaluator(model_config),
63
53
  RelevanceEvaluator(model_config),
64
54
  CoherenceEvaluator(model_config),
@@ -79,12 +69,10 @@ class QAEvaluator:
79
69
  :paramtype context: str
80
70
  :keyword ground_truth: The ground truth to be evaluated.
81
71
  :paramtype ground_truth: str
82
- :keyword parallel: Whether to evaluate in parallel. Defaults to True.
83
- :paramtype parallel: bool
84
72
  :return: The scores for QA scenario.
85
- :rtype: Dict[str, float]
73
+ :rtype: Dict[str, Union[str, float]]
86
74
  """
87
- results: Dict[str, float] = {}
75
+ results: Dict[str, Union[str, float]] = {}
88
76
  if self._parallel:
89
77
  with ThreadPoolExecutor() as executor:
90
78
  futures = {
@@ -3,62 +3,97 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  import os
6
- from typing import Optional
6
+ from typing import Dict, Union, List
7
7
 
8
- from typing_extensions import override
8
+ from typing_extensions import overload, override
9
9
 
10
+ from azure.ai.evaluation._model_configurations import Conversation
10
11
  from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
12
 
12
13
 
13
14
  class RelevanceEvaluator(PromptyEvaluatorBase):
14
15
  """
15
- Initialize a relevance evaluator configured for a specific Azure OpenAI model.
16
+ Evaluates relevance score for a given query and response or a multi-turn conversation, including reasoning.
17
+
18
+ The relevance measure assesses the ability of answers to capture the key points of the context.
19
+ High relevance scores signify the AI system's understanding of the input and its capability to produce coherent
20
+ and contextually appropriate outputs. Conversely, low relevance scores indicate that generated responses might
21
+ be off-topic, lacking in context, or insufficient in addressing the user's intended queries. Use the relevance
22
+ metric when evaluating the AI system's performance in understanding the input and generating contextually
23
+ appropriate responses.
24
+
25
+ Relevance scores range from 1 to 5, with 1 being the worst and 5 being the best.
16
26
 
17
27
  :param model_config: Configuration for the Azure OpenAI model.
18
28
  :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
19
29
  ~azure.ai.evaluation.OpenAIModelConfiguration]
20
30
 
21
- **Usage**
22
-
23
- .. code-block:: python
31
+ .. admonition:: Example:
24
32
 
25
- eval_fn = RelevanceEvaluator(model_config)
26
- result = eval_fn(
27
- query="What is the capital of Japan?",
28
- response="The capital of Japan is Tokyo.")
33
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
34
+ :start-after: [START relevance_evaluator]
35
+ :end-before: [END relevance_evaluator]
36
+ :language: python
37
+ :dedent: 8
38
+ :caption: Initialize and call a RelevanceEvaluator with a query, response, and context.
29
39
 
30
- **Output format**
40
+ .. note::
31
41
 
32
- .. code-block:: python
33
-
34
- {
35
- "relevance": 3.0,
36
- "gpt_relevance": 3.0,
37
- "relevance_reason": "The response is relevant to the query because it provides the correct answer.",
38
- }
39
-
40
- Note: To align with our support of a diverse set of models, a key without the `gpt_` prefix has been added.
41
- To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
42
- however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
42
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
43
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
44
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
43
45
  """
44
46
 
45
47
  # Constants must be defined within eval's directory to be save/loadable
46
48
  _PROMPTY_FILE = "relevance.prompty"
47
49
  _RESULT_KEY = "relevance"
48
50
 
51
+ id = "azureml://registries/azureml/models/Relevance-Evaluator/versions/4"
52
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
53
+
49
54
  @override
50
55
  def __init__(self, model_config):
51
56
  current_dir = os.path.dirname(__file__)
52
57
  prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
53
58
  super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
54
59
 
55
- @override
60
+ @overload
61
+ def __call__(
62
+ self,
63
+ *,
64
+ query: str,
65
+ response: str,
66
+ ) -> Dict[str, Union[str, float]]:
67
+ """Evaluate groundedness for given input of query, response, context
68
+
69
+ :keyword query: The query to be evaluated.
70
+ :paramtype query: str
71
+ :keyword response: The response to be evaluated.
72
+ :paramtype response: str
73
+ :return: The relevance score.
74
+ :rtype: Dict[str, float]
75
+ """
76
+
77
+ @overload
56
78
  def __call__(
57
79
  self,
58
80
  *,
59
- query: Optional[str] = None,
60
- response: Optional[str] = None,
61
- conversation=None,
81
+ conversation: Conversation,
82
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
83
+ """Evaluate relevance for a conversation
84
+
85
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
86
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
87
+ to be dictionaries with keys "content", "role", and possibly "context".
88
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
89
+ :return: The relevance score.
90
+ :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
91
+ """
92
+
93
+ @override
94
+ def __call__( # pylint: disable=docstring-missing-param
95
+ self,
96
+ *args,
62
97
  **kwargs,
63
98
  ):
64
99
  """Evaluate relevance. Accepts either a query and response for a single evaluation,
@@ -74,7 +109,6 @@ class RelevanceEvaluator(PromptyEvaluatorBase):
74
109
  to be dictionaries with keys "content", "role", and possibly "context".
75
110
  :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
76
111
  :return: The relevance score.
77
- :rtype: Union[Dict[str, float], Dict[str, Union[float, Dict[str, List[float]]]]]
112
+ :rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
78
113
  """
79
-
80
- return super().__call__(query=query, response=response, conversation=conversation, **kwargs)
114
+ return super().__call__(*args, **kwargs)