azure-ai-evaluation 1.0.0b5__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/_azure/__init__.py +3 -0
- azure/ai/evaluation/_azure/_clients.py +188 -0
- azure/ai/evaluation/_azure/_models.py +227 -0
- azure/ai/evaluation/_azure/_token_manager.py +118 -0
- azure/ai/evaluation/_common/_experimental.py +4 -0
- azure/ai/evaluation/_common/math.py +62 -2
- azure/ai/evaluation/_common/rai_service.py +110 -50
- azure/ai/evaluation/_common/utils.py +50 -16
- azure/ai/evaluation/_constants.py +2 -0
- azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +9 -0
- azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +13 -3
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +12 -1
- azure/ai/evaluation/_evaluate/_eval_run.py +38 -43
- azure/ai/evaluation/_evaluate/_evaluate.py +62 -131
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +2 -1
- azure/ai/evaluation/_evaluate/_utils.py +72 -38
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +16 -17
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +60 -29
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +88 -6
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +16 -3
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +39 -10
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +58 -52
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +79 -34
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +73 -34
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +74 -33
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +76 -34
- azure/ai/evaluation/_evaluators/_eci/_eci.py +28 -3
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +57 -26
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +13 -15
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +68 -30
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +17 -20
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +10 -8
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +0 -2
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +6 -2
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +10 -6
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +6 -2
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +6 -2
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +6 -2
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +57 -34
- azure/ai/evaluation/_evaluators/_qa/_qa.py +25 -37
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +63 -29
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +76 -161
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +24 -25
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +65 -67
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +26 -20
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +74 -40
- azure/ai/evaluation/_exceptions.py +2 -0
- azure/ai/evaluation/_http_utils.py +6 -4
- azure/ai/evaluation/_model_configurations.py +65 -14
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +0 -4
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +0 -4
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +0 -4
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +17 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +57 -47
- azure/ai/evaluation/simulator/_constants.py +11 -1
- azure/ai/evaluation/simulator/_conversation/__init__.py +128 -7
- azure/ai/evaluation/simulator/_conversation/_conversation.py +0 -1
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +16 -8
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +12 -1
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +3 -1
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +48 -4
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +1 -0
- azure/ai/evaluation/simulator/_simulator.py +54 -45
- azure/ai/evaluation/simulator/_utils.py +25 -7
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.1.0.dist-info}/METADATA +240 -327
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.1.0.dist-info}/RECORD +71 -68
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.1.0.dist-info}/NOTICE.txt +0 -0
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.1.0.dist-info}/WHEEL +0 -0
- {azure_ai_evaluation-1.0.0b5.dist-info → azure_ai_evaluation-1.1.0.dist-info}/top_level.txt +0 -0
|
@@ -28,7 +28,7 @@ class SelfHarmMultimodalEvaluator:
|
|
|
28
28
|
"project_name": "<project_name>",
|
|
29
29
|
}
|
|
30
30
|
eval_fn = SelfHarmMultimodalEvaluator(azure_ai_project)
|
|
31
|
-
result = eval_fn(
|
|
31
|
+
result = eval_fn(conversation=
|
|
32
32
|
{
|
|
33
33
|
"messages": [
|
|
34
34
|
{
|
|
@@ -66,14 +66,18 @@ class SelfHarmMultimodalEvaluator:
|
|
|
66
66
|
|
|
67
67
|
"""
|
|
68
68
|
|
|
69
|
+
id = "self_harm_multimodal"
|
|
70
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
71
|
+
|
|
69
72
|
def __init__(self, credential, azure_ai_project):
|
|
70
73
|
self._async_evaluator = _AsyncSelfHarmMultimodalEvaluator(credential, azure_ai_project)
|
|
71
74
|
|
|
72
75
|
def __call__(self, *, conversation, **kwargs):
|
|
73
76
|
"""
|
|
74
77
|
Evaluates self harm content.
|
|
78
|
+
|
|
75
79
|
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
76
|
-
Each message should have "role" and "content" keys.
|
|
80
|
+
Each message should have "role" and "content" keys. It supports single turn only.
|
|
77
81
|
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
78
82
|
:return: The self harm score.
|
|
79
83
|
:rtype: Dict[str, Union[float, str]]
|
|
@@ -28,7 +28,7 @@ class SexualMultimodalEvaluator:
|
|
|
28
28
|
"project_name": "<project_name>",
|
|
29
29
|
}
|
|
30
30
|
eval_fn = SexualMultimodalEvaluator(azure_ai_project)
|
|
31
|
-
result = eval_fn(
|
|
31
|
+
result = eval_fn(conversation=
|
|
32
32
|
{
|
|
33
33
|
"messages": [
|
|
34
34
|
{
|
|
@@ -66,14 +66,18 @@ class SexualMultimodalEvaluator:
|
|
|
66
66
|
|
|
67
67
|
"""
|
|
68
68
|
|
|
69
|
+
id = "sexual_multimodal"
|
|
70
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
71
|
+
|
|
69
72
|
def __init__(self, credential, azure_ai_project):
|
|
70
73
|
self._async_evaluator = _AsyncSexualMultimodalEvaluator(credential, azure_ai_project)
|
|
71
74
|
|
|
72
75
|
def __call__(self, *, conversation, **kwargs):
|
|
73
76
|
"""
|
|
74
77
|
Evaluates sexual content.
|
|
78
|
+
|
|
75
79
|
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
76
|
-
Each message should have "role" and "content" keys.
|
|
80
|
+
Each message should have "role" and "content" keys. It supports single turn only.
|
|
77
81
|
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
78
82
|
:return: The sexual score.
|
|
79
83
|
:rtype: Dict[str, Union[float, str]]
|
|
@@ -28,7 +28,7 @@ class ViolenceMultimodalEvaluator:
|
|
|
28
28
|
"project_name": "<project_name>",
|
|
29
29
|
}
|
|
30
30
|
eval_fn = ViolenceMultimodalEvaluator(azure_ai_project)
|
|
31
|
-
result = eval_fn(
|
|
31
|
+
result = eval_fn(conversation=
|
|
32
32
|
{
|
|
33
33
|
"messages": [
|
|
34
34
|
{
|
|
@@ -66,14 +66,18 @@ class ViolenceMultimodalEvaluator:
|
|
|
66
66
|
|
|
67
67
|
"""
|
|
68
68
|
|
|
69
|
+
id = "violence_multimodal"
|
|
70
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
71
|
+
|
|
69
72
|
def __init__(self, credential, azure_ai_project):
|
|
70
73
|
self._async_evaluator = _AsyncViolenceMultimodalEvaluator(credential, azure_ai_project)
|
|
71
74
|
|
|
72
75
|
def __call__(self, *, conversation, **kwargs):
|
|
73
76
|
"""
|
|
74
77
|
Evaluates violence content.
|
|
78
|
+
|
|
75
79
|
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
76
|
-
Each message should have "role" and "content" keys.
|
|
80
|
+
Each message should have "role" and "content" keys. It supports single turn only.
|
|
77
81
|
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
78
82
|
:return: The violence score.
|
|
79
83
|
:rtype: Dict[str, Union[float, str]]
|
|
@@ -2,21 +2,26 @@
|
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
|
-
from typing import Optional
|
|
5
|
+
from typing import Dict, List, Optional, Union
|
|
6
6
|
|
|
7
|
-
from typing_extensions import override
|
|
7
|
+
from typing_extensions import overload, override
|
|
8
8
|
|
|
9
9
|
from azure.ai.evaluation._common._experimental import experimental
|
|
10
10
|
from azure.ai.evaluation._common.constants import EvaluationMetrics
|
|
11
11
|
from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
|
|
12
|
+
from azure.ai.evaluation._model_configurations import Conversation
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
@experimental
|
|
15
|
-
class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase):
|
|
16
|
+
class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
|
|
16
17
|
"""
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
18
|
+
Evaluates the protected material score for a given query and response or a multi-turn conversation, with reasoning.
|
|
19
|
+
|
|
20
|
+
Protected material is any text that is under copyright, including song lyrics, recipes, and articles. Protected
|
|
21
|
+
material evaluation leverages the Azure AI Content Safety Protected Material for Text service to perform the
|
|
22
|
+
classification.
|
|
23
|
+
|
|
24
|
+
The protected material score is a boolean value, where True indicates that protected material was detected.
|
|
20
25
|
|
|
21
26
|
:param credential: The credential required for connecting to the Azure AI project.
|
|
22
27
|
:type credential: ~azure.core.credentials.TokenCredential
|
|
@@ -24,46 +29,64 @@ class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase):
|
|
|
24
29
|
resource group, and project name.
|
|
25
30
|
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
|
|
26
31
|
|
|
27
|
-
|
|
28
|
-
:rtype: Dict[str, Union[bool, str]]
|
|
29
|
-
|
|
30
|
-
**Usage Example**
|
|
31
|
-
|
|
32
|
-
.. code-block:: python
|
|
33
|
-
|
|
34
|
-
azure_ai_project = {
|
|
35
|
-
"subscription_id": "<subscription_id>",
|
|
36
|
-
"resource_group_name": "<resource_group_name>",
|
|
37
|
-
"project_name": "<project_name>",
|
|
38
|
-
}
|
|
39
|
-
eval_fn = ProtectedMaterialEvaluator(azure_ai_project)
|
|
40
|
-
result = eval_fn(query="What is the capital of France?", response="Paris.")
|
|
41
|
-
|
|
42
|
-
**Output Format**
|
|
43
|
-
|
|
44
|
-
.. code-block:: json
|
|
45
|
-
|
|
46
|
-
{
|
|
47
|
-
"protected_material_label": false,
|
|
48
|
-
"protected_material_reason": "This query does not contain any protected material."
|
|
49
|
-
}
|
|
32
|
+
.. admonition:: Example:
|
|
50
33
|
|
|
34
|
+
.. literalinclude:: ../samples/evaluation_samples_evaluate.py
|
|
35
|
+
:start-after: [START protected_material_evaluator]
|
|
36
|
+
:end-before: [END protected_material_evaluator]
|
|
37
|
+
:language: python
|
|
38
|
+
:dedent: 8
|
|
39
|
+
:caption: Initialize and call a ProtectedMaterialEvaluator.
|
|
51
40
|
"""
|
|
52
41
|
|
|
42
|
+
id = "azureml://registries/azureml/models/Protected-Material-Evaluator/versions/3"
|
|
43
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
44
|
+
|
|
53
45
|
@override
|
|
54
46
|
def __init__(
|
|
55
47
|
self,
|
|
56
48
|
credential,
|
|
57
49
|
azure_ai_project,
|
|
58
|
-
eval_last_turn: bool = False,
|
|
59
50
|
):
|
|
60
51
|
super().__init__(
|
|
61
52
|
eval_metric=EvaluationMetrics.PROTECTED_MATERIAL,
|
|
62
53
|
azure_ai_project=azure_ai_project,
|
|
63
54
|
credential=credential,
|
|
64
|
-
eval_last_turn=eval_last_turn,
|
|
65
55
|
)
|
|
66
56
|
|
|
57
|
+
@overload
|
|
58
|
+
def __call__(
|
|
59
|
+
self,
|
|
60
|
+
*,
|
|
61
|
+
query: str,
|
|
62
|
+
response: str,
|
|
63
|
+
) -> Dict[str, Union[str, bool]]:
|
|
64
|
+
"""Evaluate a given query/response pair for protected material
|
|
65
|
+
|
|
66
|
+
:keyword query: The query to be evaluated.
|
|
67
|
+
:paramtype query: str
|
|
68
|
+
:keyword response: The response to be evaluated.
|
|
69
|
+
:paramtype response: str
|
|
70
|
+
:return: The protected material score.
|
|
71
|
+
:rtype: Dict[str, Union[str, bool]]
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
@overload
|
|
75
|
+
def __call__(
|
|
76
|
+
self,
|
|
77
|
+
*,
|
|
78
|
+
conversation: Conversation,
|
|
79
|
+
) -> Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]:
|
|
80
|
+
"""Evaluate a conversation for protected material
|
|
81
|
+
|
|
82
|
+
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
83
|
+
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
84
|
+
to be dictionaries with keys "content", "role", and possibly "context".
|
|
85
|
+
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
86
|
+
:return: The protected material score.
|
|
87
|
+
:rtype: Dict[str, Union[str, bool, Dict[str, List[Union[str, bool]]]]]
|
|
88
|
+
"""
|
|
89
|
+
|
|
67
90
|
@override
|
|
68
91
|
def __call__(
|
|
69
92
|
self,
|
|
@@ -77,14 +100,14 @@ class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase):
|
|
|
77
100
|
Evaluate if protected material is present in your AI system's response.
|
|
78
101
|
|
|
79
102
|
:keyword query: The query to be evaluated.
|
|
80
|
-
:paramtype query: str
|
|
103
|
+
:paramtype query: Optional[str]
|
|
81
104
|
:keyword response: The response to be evaluated.
|
|
82
|
-
:paramtype response: str
|
|
105
|
+
:paramtype response: Optional[str]
|
|
83
106
|
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
84
107
|
key "messages". Conversation turns are expected
|
|
85
108
|
to be dictionaries with keys "content" and "role".
|
|
86
109
|
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
87
110
|
:return: The fluency score.
|
|
88
|
-
:rtype: Union[Dict[str, Union[str, bool]], Dict[str, Union[
|
|
111
|
+
:rtype: Union[Dict[str, Union[str, bool]], Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]]
|
|
89
112
|
"""
|
|
90
113
|
return super().__call__(query=query, response=response, conversation=conversation, **kwargs)
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
5
|
from concurrent.futures import as_completed
|
|
6
|
-
from typing import Callable, Dict, List
|
|
6
|
+
from typing import Callable, Dict, List, Union
|
|
7
7
|
|
|
8
8
|
from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
|
|
9
9
|
|
|
@@ -23,42 +23,32 @@ class QAEvaluator:
|
|
|
23
23
|
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
|
|
24
24
|
~azure.ai.evaluation.OpenAIModelConfiguration]
|
|
25
25
|
:return: A callable class that evaluates and generates metrics for "question-answering" scenario.
|
|
26
|
+
:param kwargs: Additional arguments to pass to the evaluator.
|
|
27
|
+
:type kwargs: Any
|
|
26
28
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
.. code-block:: python
|
|
42
|
-
|
|
43
|
-
{
|
|
44
|
-
"groundedness": 3.5,
|
|
45
|
-
"relevance": 4.0,
|
|
46
|
-
"coherence": 1.5,
|
|
47
|
-
"fluency": 4.0,
|
|
48
|
-
"similarity": 3.0,
|
|
49
|
-
"gpt_groundedness": 3.5,
|
|
50
|
-
"gpt_relevance": 4.0,
|
|
51
|
-
"gpt_coherence": 1.5,
|
|
52
|
-
"gpt_fluency": 4.0,
|
|
53
|
-
"gpt_similarity": 3.0,
|
|
54
|
-
"f1_score": 0.42
|
|
55
|
-
}
|
|
29
|
+
.. admonition:: Example:
|
|
30
|
+
|
|
31
|
+
.. literalinclude:: ../samples/evaluation_samples_evaluate.py
|
|
32
|
+
:start-after: [START qa_evaluator]
|
|
33
|
+
:end-before: [END qa_evaluator]
|
|
34
|
+
:language: python
|
|
35
|
+
:dedent: 8
|
|
36
|
+
:caption: Initialize and call a QAEvaluator.
|
|
37
|
+
|
|
38
|
+
.. note::
|
|
39
|
+
|
|
40
|
+
To align with our support of a diverse set of models, keys without the `gpt_` prefix has been added.
|
|
41
|
+
To maintain backwards compatibility, the old keys with the `gpt_` prefix are still be present in the output;
|
|
42
|
+
however, it is recommended to use the new keys moving forward as the old keys will be deprecated in the future.
|
|
56
43
|
"""
|
|
57
44
|
|
|
58
|
-
|
|
59
|
-
|
|
45
|
+
id = "qa"
|
|
46
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
47
|
+
|
|
48
|
+
def __init__(self, model_config, **kwargs):
|
|
49
|
+
self._parallel = kwargs.pop("_parallel", False)
|
|
60
50
|
|
|
61
|
-
self._evaluators: List[Callable[..., Dict[str, float]]] = [
|
|
51
|
+
self._evaluators: List[Union[Callable[..., Dict[str, Union[str, float]]], Callable[..., Dict[str, float]]]] = [
|
|
62
52
|
GroundednessEvaluator(model_config),
|
|
63
53
|
RelevanceEvaluator(model_config),
|
|
64
54
|
CoherenceEvaluator(model_config),
|
|
@@ -79,12 +69,10 @@ class QAEvaluator:
|
|
|
79
69
|
:paramtype context: str
|
|
80
70
|
:keyword ground_truth: The ground truth to be evaluated.
|
|
81
71
|
:paramtype ground_truth: str
|
|
82
|
-
:keyword parallel: Whether to evaluate in parallel. Defaults to True.
|
|
83
|
-
:paramtype parallel: bool
|
|
84
72
|
:return: The scores for QA scenario.
|
|
85
|
-
:rtype: Dict[str, float]
|
|
73
|
+
:rtype: Dict[str, Union[str, float]]
|
|
86
74
|
"""
|
|
87
|
-
results: Dict[str, float] = {}
|
|
75
|
+
results: Dict[str, Union[str, float]] = {}
|
|
88
76
|
if self._parallel:
|
|
89
77
|
with ThreadPoolExecutor() as executor:
|
|
90
78
|
futures = {
|
|
@@ -3,62 +3,97 @@
|
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
5
|
import os
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Dict, Union, List
|
|
7
7
|
|
|
8
|
-
from typing_extensions import override
|
|
8
|
+
from typing_extensions import overload, override
|
|
9
9
|
|
|
10
|
+
from azure.ai.evaluation._model_configurations import Conversation
|
|
10
11
|
from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
class RelevanceEvaluator(PromptyEvaluatorBase):
|
|
14
15
|
"""
|
|
15
|
-
|
|
16
|
+
Evaluates relevance score for a given query and response or a multi-turn conversation, including reasoning.
|
|
17
|
+
|
|
18
|
+
The relevance measure assesses the ability of answers to capture the key points of the context.
|
|
19
|
+
High relevance scores signify the AI system's understanding of the input and its capability to produce coherent
|
|
20
|
+
and contextually appropriate outputs. Conversely, low relevance scores indicate that generated responses might
|
|
21
|
+
be off-topic, lacking in context, or insufficient in addressing the user's intended queries. Use the relevance
|
|
22
|
+
metric when evaluating the AI system's performance in understanding the input and generating contextually
|
|
23
|
+
appropriate responses.
|
|
24
|
+
|
|
25
|
+
Relevance scores range from 1 to 5, with 1 being the worst and 5 being the best.
|
|
16
26
|
|
|
17
27
|
:param model_config: Configuration for the Azure OpenAI model.
|
|
18
28
|
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
|
|
19
29
|
~azure.ai.evaluation.OpenAIModelConfiguration]
|
|
20
30
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
.. code-block:: python
|
|
31
|
+
.. admonition:: Example:
|
|
24
32
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
33
|
+
.. literalinclude:: ../samples/evaluation_samples_evaluate.py
|
|
34
|
+
:start-after: [START relevance_evaluator]
|
|
35
|
+
:end-before: [END relevance_evaluator]
|
|
36
|
+
:language: python
|
|
37
|
+
:dedent: 8
|
|
38
|
+
:caption: Initialize and call a RelevanceEvaluator with a query, response, and context.
|
|
29
39
|
|
|
30
|
-
|
|
40
|
+
.. note::
|
|
31
41
|
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
"relevance": 3.0,
|
|
36
|
-
"gpt_relevance": 3.0,
|
|
37
|
-
"relevance_reason": "The response is relevant to the query because it provides the correct answer.",
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
Note: To align with our support of a diverse set of models, a key without the `gpt_` prefix has been added.
|
|
41
|
-
To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
|
|
42
|
-
however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
|
|
42
|
+
To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
|
|
43
|
+
To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
|
|
44
|
+
however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
|
|
43
45
|
"""
|
|
44
46
|
|
|
45
47
|
# Constants must be defined within eval's directory to be save/loadable
|
|
46
48
|
_PROMPTY_FILE = "relevance.prompty"
|
|
47
49
|
_RESULT_KEY = "relevance"
|
|
48
50
|
|
|
51
|
+
id = "azureml://registries/azureml/models/Relevance-Evaluator/versions/4"
|
|
52
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
53
|
+
|
|
49
54
|
@override
|
|
50
55
|
def __init__(self, model_config):
|
|
51
56
|
current_dir = os.path.dirname(__file__)
|
|
52
57
|
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
|
|
53
58
|
super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
|
|
54
59
|
|
|
55
|
-
@
|
|
60
|
+
@overload
|
|
61
|
+
def __call__(
|
|
62
|
+
self,
|
|
63
|
+
*,
|
|
64
|
+
query: str,
|
|
65
|
+
response: str,
|
|
66
|
+
) -> Dict[str, Union[str, float]]:
|
|
67
|
+
"""Evaluate groundedness for given input of query, response, context
|
|
68
|
+
|
|
69
|
+
:keyword query: The query to be evaluated.
|
|
70
|
+
:paramtype query: str
|
|
71
|
+
:keyword response: The response to be evaluated.
|
|
72
|
+
:paramtype response: str
|
|
73
|
+
:return: The relevance score.
|
|
74
|
+
:rtype: Dict[str, float]
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
@overload
|
|
56
78
|
def __call__(
|
|
57
79
|
self,
|
|
58
80
|
*,
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
conversation
|
|
81
|
+
conversation: Conversation,
|
|
82
|
+
) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
|
|
83
|
+
"""Evaluate relevance for a conversation
|
|
84
|
+
|
|
85
|
+
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
86
|
+
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
87
|
+
to be dictionaries with keys "content", "role", and possibly "context".
|
|
88
|
+
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
89
|
+
:return: The relevance score.
|
|
90
|
+
:rtype: Dict[str, Union[float, Dict[str, List[float]]]]
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
@override
|
|
94
|
+
def __call__( # pylint: disable=docstring-missing-param
|
|
95
|
+
self,
|
|
96
|
+
*args,
|
|
62
97
|
**kwargs,
|
|
63
98
|
):
|
|
64
99
|
"""Evaluate relevance. Accepts either a query and response for a single evaluation,
|
|
@@ -74,7 +109,6 @@ class RelevanceEvaluator(PromptyEvaluatorBase):
|
|
|
74
109
|
to be dictionaries with keys "content", "role", and possibly "context".
|
|
75
110
|
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
76
111
|
:return: The relevance score.
|
|
77
|
-
:rtype: Union[Dict[str, float], Dict[str, Union[float, Dict[str, List[float]]]]]
|
|
112
|
+
:rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
|
|
78
113
|
"""
|
|
79
|
-
|
|
80
|
-
return super().__call__(query=query, response=response, conversation=conversation, **kwargs)
|
|
114
|
+
return super().__call__(*args, **kwargs)
|