azure-ai-evaluation 1.0.0b4__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/ai/evaluation/__init__.py +22 -0
- azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +4 -0
- azure/ai/evaluation/_common/constants.py +5 -0
- azure/ai/evaluation/_common/math.py +73 -2
- azure/ai/evaluation/_common/rai_service.py +250 -62
- azure/ai/evaluation/_common/utils.py +196 -23
- azure/ai/evaluation/_constants.py +7 -6
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/__init__.py +3 -2
- azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +13 -4
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/proxy_client.py +19 -6
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +46 -0
- azure/ai/evaluation/_evaluate/_eval_run.py +55 -14
- azure/ai/evaluation/_evaluate/_evaluate.py +312 -228
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +7 -6
- azure/ai/evaluation/_evaluate/_utils.py +46 -11
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +17 -18
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +67 -31
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -34
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +37 -24
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +21 -9
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +52 -16
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +91 -48
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +100 -26
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +94 -26
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +96 -26
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +97 -26
- azure/ai/evaluation/_evaluators/_eci/_eci.py +31 -4
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +67 -36
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -36
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +14 -16
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +106 -34
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +20 -27
- azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +132 -0
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +55 -0
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +100 -0
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +124 -0
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +100 -0
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +100 -0
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +100 -0
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +87 -31
- azure/ai/evaluation/_evaluators/_qa/_qa.py +23 -31
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +72 -36
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +78 -42
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +83 -125
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +74 -24
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +26 -27
- azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +148 -0
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +37 -28
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +94 -33
- azure/ai/evaluation/_exceptions.py +19 -0
- azure/ai/evaluation/_model_configurations.py +83 -15
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/simulator/__init__.py +2 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +20 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +29 -35
- azure/ai/evaluation/simulator/_constants.py +11 -1
- azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
- azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +17 -9
- azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +22 -1
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +90 -35
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +4 -2
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +8 -4
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -4
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -1
- azure/ai/evaluation/simulator/_simulator.py +165 -105
- azure/ai/evaluation/simulator/_utils.py +31 -13
- azure_ai_evaluation-1.0.1.dist-info/METADATA +600 -0
- {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/NOTICE.txt +20 -0
- azure_ai_evaluation-1.0.1.dist-info/RECORD +119 -0
- {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/WHEEL +1 -1
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
- azure_ai_evaluation-1.0.0b4.dist-info/METADATA +0 -535
- azure_ai_evaluation-1.0.0b4.dist-info/RECORD +0 -106
- /azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +0 -0
- {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/top_level.txt +0 -0
|
@@ -3,76 +3,112 @@
|
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
5
|
import os
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Dict, Union, List
|
|
7
7
|
|
|
8
|
-
from typing_extensions import override
|
|
8
|
+
from typing_extensions import overload, override
|
|
9
9
|
|
|
10
|
+
from azure.ai.evaluation._model_configurations import Conversation
|
|
10
11
|
from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
class RelevanceEvaluator(PromptyEvaluatorBase):
|
|
14
15
|
"""
|
|
15
|
-
|
|
16
|
+
Evaluates relevance score for a given query and response or a multi-turn conversation, including reasoning.
|
|
17
|
+
|
|
18
|
+
The relevance measure assesses the ability of answers to capture the key points of the context.
|
|
19
|
+
High relevance scores signify the AI system's understanding of the input and its capability to produce coherent
|
|
20
|
+
and contextually appropriate outputs. Conversely, low relevance scores indicate that generated responses might
|
|
21
|
+
be off-topic, lacking in context, or insufficient in addressing the user's intended queries. Use the relevance
|
|
22
|
+
metric when evaluating the AI system's performance in understanding the input and generating contextually
|
|
23
|
+
appropriate responses.
|
|
24
|
+
|
|
25
|
+
Relevance scores range from 1 to 5, with 1 being the worst and 5 being the best.
|
|
16
26
|
|
|
17
27
|
:param model_config: Configuration for the Azure OpenAI model.
|
|
18
28
|
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
|
|
19
29
|
~azure.ai.evaluation.OpenAIModelConfiguration]
|
|
20
30
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
.. code-block:: python
|
|
24
|
-
|
|
25
|
-
eval_fn = RelevanceEvaluator(model_config)
|
|
26
|
-
result = eval_fn(
|
|
27
|
-
query="What is the capital of Japan?",
|
|
28
|
-
response="The capital of Japan is Tokyo.",
|
|
29
|
-
context="Tokyo is Japan's capital, known for its blend of traditional culture \
|
|
30
|
-
and technological advancements.")
|
|
31
|
+
.. admonition:: Example:
|
|
31
32
|
|
|
32
|
-
|
|
33
|
+
.. literalinclude:: ../samples/evaluation_samples_evaluate.py
|
|
34
|
+
:start-after: [START relevance_evaluator]
|
|
35
|
+
:end-before: [END relevance_evaluator]
|
|
36
|
+
:language: python
|
|
37
|
+
:dedent: 8
|
|
38
|
+
:caption: Initialize and call a RelevanceEvaluator with a query, response, and context.
|
|
33
39
|
|
|
34
|
-
..
|
|
40
|
+
.. note::
|
|
35
41
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
42
|
+
To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
|
|
43
|
+
To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
|
|
44
|
+
however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
|
|
39
45
|
"""
|
|
40
46
|
|
|
41
47
|
# Constants must be defined within eval's directory to be save/loadable
|
|
42
|
-
|
|
43
|
-
|
|
48
|
+
_PROMPTY_FILE = "relevance.prompty"
|
|
49
|
+
_RESULT_KEY = "relevance"
|
|
50
|
+
|
|
51
|
+
id = "azureml://registries/azureml/models/Relevance-Evaluator/versions/4"
|
|
52
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
44
53
|
|
|
45
54
|
@override
|
|
46
|
-
def __init__(self, model_config
|
|
55
|
+
def __init__(self, model_config):
|
|
47
56
|
current_dir = os.path.dirname(__file__)
|
|
48
|
-
prompty_path = os.path.join(current_dir, self.
|
|
49
|
-
super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self.
|
|
57
|
+
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
|
|
58
|
+
super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
|
|
50
59
|
|
|
51
|
-
@
|
|
60
|
+
@overload
|
|
61
|
+
def __call__(
|
|
62
|
+
self,
|
|
63
|
+
*,
|
|
64
|
+
query: str,
|
|
65
|
+
response: str,
|
|
66
|
+
) -> Dict[str, Union[str, float]]:
|
|
67
|
+
"""Evaluate groundedness for given input of query, response, context
|
|
68
|
+
|
|
69
|
+
:keyword query: The query to be evaluated.
|
|
70
|
+
:paramtype query: str
|
|
71
|
+
:keyword response: The response to be evaluated.
|
|
72
|
+
:paramtype response: str
|
|
73
|
+
:return: The relevance score.
|
|
74
|
+
:rtype: Dict[str, float]
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
@overload
|
|
52
78
|
def __call__(
|
|
53
79
|
self,
|
|
54
80
|
*,
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
81
|
+
conversation: Conversation,
|
|
82
|
+
) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
|
|
83
|
+
"""Evaluate relevance for a conversation
|
|
84
|
+
|
|
85
|
+
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
86
|
+
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
87
|
+
to be dictionaries with keys "content", "role", and possibly "context".
|
|
88
|
+
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
89
|
+
:return: The relevance score.
|
|
90
|
+
:rtype: Dict[str, Union[float, Dict[str, List[float]]]]
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
@override
|
|
94
|
+
def __call__( # pylint: disable=docstring-missing-param
|
|
95
|
+
self,
|
|
96
|
+
*args,
|
|
59
97
|
**kwargs,
|
|
60
98
|
):
|
|
61
|
-
"""Evaluate relevance. Accepts either a
|
|
99
|
+
"""Evaluate relevance. Accepts either a query and response for a single evaluation,
|
|
62
100
|
or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
|
|
63
101
|
the evaluator will aggregate the results of each turn.
|
|
64
102
|
|
|
65
|
-
:keyword query: The query to be evaluated.
|
|
103
|
+
:keyword query: The query to be evaluated. Mutually exclusive with the `conversation` parameter.
|
|
66
104
|
:paramtype query: Optional[str]
|
|
67
|
-
:keyword response: The response to be evaluated.
|
|
105
|
+
:keyword response: The response to be evaluated. Mutually exclusive with the `conversation` parameter.
|
|
68
106
|
:paramtype response: Optional[str]
|
|
69
|
-
:keyword context: The context to be evaluated.
|
|
70
|
-
:paramtype context: Optional[str]
|
|
71
107
|
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
72
108
|
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
73
109
|
to be dictionaries with keys "content", "role", and possibly "context".
|
|
74
|
-
:paramtype conversation: Optional[
|
|
110
|
+
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
75
111
|
:return: The relevance score.
|
|
76
|
-
:rtype: Dict[str, float]
|
|
112
|
+
:rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
|
|
77
113
|
"""
|
|
78
|
-
return super().__call__(
|
|
114
|
+
return super().__call__(*args, **kwargs)
|
|
@@ -5,7 +5,7 @@ model:
|
|
|
5
5
|
api: chat
|
|
6
6
|
parameters:
|
|
7
7
|
temperature: 0.0
|
|
8
|
-
max_tokens:
|
|
8
|
+
max_tokens: 800
|
|
9
9
|
top_p: 1.0
|
|
10
10
|
presence_penalty: 0
|
|
11
11
|
frequency_penalty: 0
|
|
@@ -17,48 +17,84 @@ inputs:
|
|
|
17
17
|
type: string
|
|
18
18
|
response:
|
|
19
19
|
type: string
|
|
20
|
-
context:
|
|
21
|
-
type: string
|
|
22
20
|
|
|
23
21
|
---
|
|
24
22
|
system:
|
|
25
|
-
|
|
23
|
+
# Instruction
|
|
24
|
+
## Goal
|
|
25
|
+
### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
|
|
26
|
+
- **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
|
|
27
|
+
- **Data**: Your input data include QUERY and RESPONSE.
|
|
28
|
+
- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
|
|
29
|
+
|
|
26
30
|
user:
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
question
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
question
|
|
63
|
-
|
|
64
|
-
|
|
31
|
+
# Definition
|
|
32
|
+
**Relevance** refers to how effectively a response addresses a question. It assesses the accuracy, completeness, and direct relevance of the response based solely on the given information.
|
|
33
|
+
|
|
34
|
+
# Ratings
|
|
35
|
+
## [Relevance: 1] (Irrelevant Response)
|
|
36
|
+
**Definition:** The response is unrelated to the question. It provides information that is off-topic and does not attempt to address the question posed.
|
|
37
|
+
|
|
38
|
+
**Examples:**
|
|
39
|
+
**Query:** What is the team preparing for?
|
|
40
|
+
**Response:** I went grocery shopping yesterday evening.
|
|
41
|
+
|
|
42
|
+
**Query:** When will the company's new product line launch?
|
|
43
|
+
**Response:** International travel can be very rewarding and educational.
|
|
44
|
+
|
|
45
|
+
## [Relevance: 2] (Incorrect Response)
|
|
46
|
+
**Definition:** The response attempts to address the question but includes incorrect information. It provides a response that is factually wrong based on the provided information.
|
|
47
|
+
|
|
48
|
+
**Examples:**
|
|
49
|
+
**Query:** When was the merger between the two firms finalized?
|
|
50
|
+
**Response:** The merger was finalized on April 10th.
|
|
51
|
+
|
|
52
|
+
**Query:** Where and when will the solar eclipse be visible?
|
|
53
|
+
**Response:** The solar eclipse will be visible in Asia on December 14th.
|
|
54
|
+
|
|
55
|
+
## [Relevance: 3] (Incomplete Response)
|
|
56
|
+
**Definition:** The response addresses the question but omits key details necessary for a full understanding. It provides a partial response that lacks essential information.
|
|
57
|
+
|
|
58
|
+
**Examples:**
|
|
59
|
+
**Query:** What type of food does the new restaurant offer?
|
|
60
|
+
**Response:** The restaurant offers Italian food like pasta.
|
|
61
|
+
|
|
62
|
+
**Query:** What topics will the conference cover?
|
|
63
|
+
**Response:** The conference will cover renewable energy and climate change.
|
|
64
|
+
|
|
65
|
+
## [Relevance: 4] (Complete Response)
|
|
66
|
+
**Definition:** The response fully addresses the question with accurate and complete information. It includes all essential details required for a comprehensive understanding, without adding any extraneous information.
|
|
67
|
+
|
|
68
|
+
**Examples:**
|
|
69
|
+
**Query:** What type of food does the new restaurant offer?
|
|
70
|
+
**Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto.
|
|
71
|
+
|
|
72
|
+
**Query:** What topics will the conference cover?
|
|
73
|
+
**Response:** The conference will cover renewable energy, climate change, and sustainability practices.
|
|
74
|
+
|
|
75
|
+
## [Relevance: 5] (Comprehensive Response with Insights)
|
|
76
|
+
**Definition:** The response not only fully and accurately addresses the question but also includes additional relevant insights or elaboration. It may explain the significance, implications, or provide minor inferences that enhance understanding.
|
|
77
|
+
|
|
78
|
+
**Examples:**
|
|
79
|
+
**Query:** What type of food does the new restaurant offer?
|
|
80
|
+
**Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto, aiming to provide customers with an authentic Italian dining experience.
|
|
81
|
+
|
|
82
|
+
**Query:** What topics will the conference cover?
|
|
83
|
+
**Response:** The conference will cover renewable energy, climate change, and sustainability practices, bringing together global experts to discuss these critical issues.
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# Data
|
|
88
|
+
QUERY: {{query}}
|
|
89
|
+
RESPONSE: {{response}}
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
# Tasks
|
|
93
|
+
## Please provide your assessment Score for the previous RESPONSE in relation to the QUERY based on the Definitions above. Your output should include the following information:
|
|
94
|
+
- **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
|
|
95
|
+
- **Explanation**: a very short explanation of why you think the input Data should get that Score.
|
|
96
|
+
- **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
|
|
100
|
+
# Output
|
|
@@ -2,104 +2,31 @@
|
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
|
-
import json
|
|
6
5
|
import logging
|
|
7
|
-
import math
|
|
8
6
|
import os
|
|
9
|
-
import
|
|
10
|
-
from
|
|
7
|
+
from typing import Dict, List, Union
|
|
8
|
+
from typing_extensions import overload, override
|
|
11
9
|
|
|
12
|
-
from
|
|
13
|
-
from
|
|
14
|
-
|
|
15
|
-
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
|
|
16
|
-
|
|
17
|
-
from ..._common.math import list_mean_nan_safe
|
|
18
|
-
from ..._common.utils import construct_prompty_model_config, validate_model_config
|
|
10
|
+
from azure.ai.evaluation._evaluators._common._base_prompty_eval import PromptyEvaluatorBase
|
|
11
|
+
from azure.ai.evaluation._model_configurations import Conversation
|
|
19
12
|
|
|
20
13
|
logger = logging.getLogger(__name__)
|
|
21
14
|
|
|
22
|
-
try:
|
|
23
|
-
from .._user_agent import USER_AGENT
|
|
24
|
-
except ImportError:
|
|
25
|
-
USER_AGENT = "None"
|
|
26
15
|
|
|
16
|
+
class RetrievalEvaluator(PromptyEvaluatorBase[Union[str, float]]):
|
|
17
|
+
"""
|
|
18
|
+
Evaluates retrieval score for a given query and context or a multi-turn conversation, including reasoning.
|
|
27
19
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
PROMPTY_FILE = "retrieval.prompty"
|
|
31
|
-
LLM_CALL_TIMEOUT = 600
|
|
32
|
-
DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
|
|
20
|
+
The retrieval measure assesses the AI system's performance in retrieving information
|
|
21
|
+
for additional context (e.g. a RAG scenario).
|
|
33
22
|
|
|
34
|
-
|
|
35
|
-
prompty_model_config = construct_prompty_model_config(
|
|
36
|
-
model_config,
|
|
37
|
-
self.DEFAULT_OPEN_API_VERSION,
|
|
38
|
-
USER_AGENT,
|
|
39
|
-
)
|
|
23
|
+
Retrieval scores range from 1 to 5, with 1 being the worst and 5 being the best.
|
|
40
24
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
# Extract queries, responses and contexts from conversation
|
|
47
|
-
queries = []
|
|
48
|
-
responses = []
|
|
49
|
-
contexts = []
|
|
50
|
-
|
|
51
|
-
for each_turn in conversation:
|
|
52
|
-
role = each_turn["role"]
|
|
53
|
-
if role == "user":
|
|
54
|
-
queries.append(each_turn["content"])
|
|
55
|
-
elif role == "assistant":
|
|
56
|
-
responses.append(each_turn["content"])
|
|
57
|
-
if "context" in each_turn and "citations" in each_turn["context"]:
|
|
58
|
-
citations = json.dumps(each_turn["context"]["citations"])
|
|
59
|
-
contexts.append(citations)
|
|
60
|
-
|
|
61
|
-
# Evaluate each turn
|
|
62
|
-
per_turn_scores = []
|
|
63
|
-
history = []
|
|
64
|
-
for turn_num, query in enumerate(queries):
|
|
65
|
-
try:
|
|
66
|
-
query = query if turn_num < len(queries) else ""
|
|
67
|
-
answer = responses[turn_num] if turn_num < len(responses) else ""
|
|
68
|
-
context = contexts[turn_num] if turn_num < len(contexts) else ""
|
|
69
|
-
|
|
70
|
-
history.append({"user": query, "assistant": answer})
|
|
71
|
-
|
|
72
|
-
llm_output = await self._flow(
|
|
73
|
-
query=query, history=history, documents=context, timeout=self.LLM_CALL_TIMEOUT, **kwargs
|
|
74
|
-
)
|
|
75
|
-
score = math.nan
|
|
76
|
-
if llm_output:
|
|
77
|
-
parsed_score_response = re.findall(r"\d+", llm_output.split("# Result")[-1].strip())
|
|
78
|
-
if len(parsed_score_response) > 0:
|
|
79
|
-
score = float(parsed_score_response[0].replace("'", "").strip())
|
|
80
|
-
|
|
81
|
-
per_turn_scores.append(score)
|
|
82
|
-
|
|
83
|
-
except Exception as e: # pylint: disable=broad-exception-caught
|
|
84
|
-
logger.warning(
|
|
85
|
-
"Evaluator %s failed for turn %s with exception: %s", self.__class__.__name__, turn_num + 1, e
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
per_turn_scores.append(math.nan)
|
|
89
|
-
|
|
90
|
-
return {
|
|
91
|
-
"gpt_retrieval": list_mean_nan_safe(per_turn_scores),
|
|
92
|
-
"evaluation_per_turn": {
|
|
93
|
-
"gpt_retrieval": {
|
|
94
|
-
"score": per_turn_scores,
|
|
95
|
-
}
|
|
96
|
-
},
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
class RetrievalEvaluator:
|
|
101
|
-
"""
|
|
102
|
-
Initialize an evaluator configured for a specific Azure OpenAI model.
|
|
25
|
+
High retrieval scores indicate that the AI system has successfully extracted and ranked
|
|
26
|
+
the most relevant information at the top, without introducing bias from external knowledge
|
|
27
|
+
and ignoring factual correctness. Conversely, low retrieval scores suggest that the AI system
|
|
28
|
+
has failed to surface the most relevant context chunks at the top of the list
|
|
29
|
+
and/or introduced bias and ignored factual correctness.
|
|
103
30
|
|
|
104
31
|
:param model_config: Configuration for the Azure OpenAI model.
|
|
105
32
|
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
|
|
@@ -107,48 +34,79 @@ class RetrievalEvaluator:
|
|
|
107
34
|
:return: A function that evaluates and generates metrics for "chat" scenario.
|
|
108
35
|
:rtype: Callable
|
|
109
36
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
result = chat_eval(conversation=conversation)
|
|
125
|
-
|
|
126
|
-
**Output format**
|
|
127
|
-
|
|
128
|
-
.. code-block:: python
|
|
129
|
-
|
|
130
|
-
{
|
|
131
|
-
"gpt_retrieval": 3.0
|
|
132
|
-
"evaluation_per_turn": {
|
|
133
|
-
"gpt_retrieval": {
|
|
134
|
-
"score": [1.0, 2.0, 3.0]
|
|
135
|
-
}
|
|
136
|
-
}
|
|
137
|
-
}
|
|
37
|
+
.. admonition:: Example:
|
|
38
|
+
|
|
39
|
+
.. literalinclude:: ../samples/evaluation_samples_evaluate.py
|
|
40
|
+
:start-after: [START retrieval_evaluator]
|
|
41
|
+
:end-before: [END retrieval_evaluator]
|
|
42
|
+
:language: python
|
|
43
|
+
:dedent: 8
|
|
44
|
+
:caption: Initialize and call a RetrievalEvaluator.
|
|
45
|
+
|
|
46
|
+
.. note::
|
|
47
|
+
|
|
48
|
+
To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
|
|
49
|
+
To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
|
|
50
|
+
however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
|
|
138
51
|
"""
|
|
139
52
|
|
|
140
|
-
|
|
141
|
-
|
|
53
|
+
_PROMPTY_FILE = "retrieval.prompty"
|
|
54
|
+
_RESULT_KEY = "retrieval"
|
|
142
55
|
|
|
143
|
-
|
|
144
|
-
|
|
56
|
+
id = "azureml://registries/azureml/models/Retrieval-Evaluator/versions/1"
|
|
57
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
58
|
+
|
|
59
|
+
@override
|
|
60
|
+
def __init__(self, model_config): # pylint: disable=super-init-not-called
|
|
61
|
+
current_dir = os.path.dirname(__file__)
|
|
62
|
+
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
|
|
63
|
+
super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
|
|
64
|
+
|
|
65
|
+
@overload
|
|
66
|
+
def __call__(
|
|
67
|
+
self,
|
|
68
|
+
*,
|
|
69
|
+
query: str,
|
|
70
|
+
context: str,
|
|
71
|
+
) -> Dict[str, Union[str, float]]:
|
|
72
|
+
"""Evaluates retrieval for a given a query and context
|
|
73
|
+
|
|
74
|
+
:keyword query: The query to be evaluated. Mutually exclusive with `conversation` parameter.
|
|
75
|
+
:paramtype query: Optional[str]
|
|
76
|
+
:keyword context: The context to be evaluated. Mutually exclusive with `conversation` parameter.
|
|
77
|
+
:paramtype context: Optional[str]
|
|
78
|
+
:return: The scores for Chat scenario.
|
|
79
|
+
:rtype: Dict[str, Union[str, float]]
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
@overload
|
|
83
|
+
def __call__(
|
|
84
|
+
self,
|
|
85
|
+
*,
|
|
86
|
+
conversation: Conversation,
|
|
87
|
+
) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
|
|
88
|
+
"""Evaluates retrieval for a for a multi-turn evaluation. If the conversation has more than one turn,
|
|
89
|
+
the evaluator will aggregate the results of each turn.
|
|
145
90
|
|
|
146
91
|
:keyword conversation: The conversation to be evaluated.
|
|
147
|
-
:paramtype conversation:
|
|
92
|
+
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
148
93
|
:return: The scores for Chat scenario.
|
|
149
|
-
:rtype:
|
|
94
|
+
:rtype: Dict[str, Union[float, Dict[str, List[float]]]]
|
|
150
95
|
"""
|
|
151
|
-
return async_run_allowing_running_loop(self._async_evaluator, conversation=conversation, **kwargs)
|
|
152
96
|
|
|
153
|
-
|
|
154
|
-
|
|
97
|
+
@override
|
|
98
|
+
def __call__(self, *args, **kwargs): # pylint: disable=docstring-missing-param
|
|
99
|
+
"""Evaluates retrieval score chat scenario. Accepts either a query and context for a single evaluation,
|
|
100
|
+
or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
|
|
101
|
+
the evaluator will aggregate the results of each turn.
|
|
102
|
+
|
|
103
|
+
:keyword query: The query to be evaluated. Mutually exclusive with `conversation` parameter.
|
|
104
|
+
:paramtype query: Optional[str]
|
|
105
|
+
:keyword context: The context to be evaluated. Mutually exclusive with `conversation` parameter.
|
|
106
|
+
:paramtype context: Optional[str]
|
|
107
|
+
:keyword conversation: The conversation to be evaluated.
|
|
108
|
+
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
109
|
+
:return: The scores for Chat scenario.
|
|
110
|
+
:rtype: :rtype: Dict[str, Union[float, Dict[str, List[str, float]]]]
|
|
111
|
+
"""
|
|
112
|
+
return super().__call__(*args, **kwargs)
|