azure-ai-evaluation 1.0.0b3__py3-none-any.whl → 1.0.0b5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/__init__.py +23 -1
- azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +20 -9
- azure/ai/evaluation/_common/constants.py +9 -2
- azure/ai/evaluation/_common/math.py +29 -0
- azure/ai/evaluation/_common/rai_service.py +222 -93
- azure/ai/evaluation/_common/utils.py +328 -19
- azure/ai/evaluation/_constants.py +16 -8
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/__init__.py +3 -2
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +33 -17
- azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +14 -7
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/proxy_client.py +22 -4
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +35 -0
- azure/ai/evaluation/_evaluate/_eval_run.py +47 -14
- azure/ai/evaluation/_evaluate/_evaluate.py +370 -188
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +15 -16
- azure/ai/evaluation/_evaluate/_utils.py +77 -25
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +1 -1
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +16 -10
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -34
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +76 -46
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +26 -19
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +62 -25
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +68 -36
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +67 -46
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +33 -4
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +33 -4
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +33 -4
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +33 -4
- azure/ai/evaluation/_evaluators/_eci/_eci.py +7 -5
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +14 -6
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +22 -21
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -36
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +1 -1
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +51 -16
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +3 -7
- azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +130 -0
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +57 -0
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +96 -0
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +120 -0
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +96 -0
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +96 -0
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +96 -0
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +46 -13
- azure/ai/evaluation/_evaluators/_qa/_qa.py +11 -6
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +23 -20
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +78 -42
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +126 -80
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +74 -24
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +2 -2
- azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +150 -0
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +32 -15
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +36 -10
- azure/ai/evaluation/_exceptions.py +26 -6
- azure/ai/evaluation/_http_utils.py +203 -132
- azure/ai/evaluation/_model_configurations.py +23 -6
- azure/ai/evaluation/_vendor/__init__.py +3 -0
- azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +328 -0
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +63 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +63 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/simulator/__init__.py +2 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +5 -0
- azure/ai/evaluation/simulator/_adversarial_simulator.py +88 -60
- azure/ai/evaluation/simulator/_conversation/__init__.py +13 -12
- azure/ai/evaluation/simulator/_conversation/_conversation.py +4 -4
- azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
- azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +24 -66
- azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +26 -5
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +98 -95
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +67 -21
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +28 -11
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +68 -24
- azure/ai/evaluation/simulator/_model_tools/models.py +10 -10
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -9
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -5
- azure/ai/evaluation/simulator/_simulator.py +222 -169
- azure/ai/evaluation/simulator/_tracing.py +4 -4
- azure/ai/evaluation/simulator/_utils.py +6 -6
- {azure_ai_evaluation-1.0.0b3.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/METADATA +237 -52
- azure_ai_evaluation-1.0.0b5.dist-info/NOTICE.txt +70 -0
- azure_ai_evaluation-1.0.0b5.dist-info/RECORD +120 -0
- {azure_ai_evaluation-1.0.0b3.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/WHEEL +1 -1
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
- azure_ai_evaluation-1.0.0b3.dist-info/RECORD +0 -98
- {azure_ai_evaluation-1.0.0b3.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/top_level.txt +0 -0
|
@@ -5,7 +5,7 @@ model:
|
|
|
5
5
|
api: chat
|
|
6
6
|
parameters:
|
|
7
7
|
temperature: 0.0
|
|
8
|
-
max_tokens:
|
|
8
|
+
max_tokens: 800
|
|
9
9
|
top_p: 1.0
|
|
10
10
|
presence_penalty: 0
|
|
11
11
|
frequency_penalty: 0
|
|
@@ -13,44 +13,74 @@ model:
|
|
|
13
13
|
type: text
|
|
14
14
|
|
|
15
15
|
inputs:
|
|
16
|
-
query:
|
|
17
|
-
type: string
|
|
18
16
|
response:
|
|
19
17
|
type: string
|
|
20
18
|
|
|
21
19
|
---
|
|
22
20
|
system:
|
|
23
|
-
|
|
21
|
+
# Instruction
|
|
22
|
+
## Goal
|
|
23
|
+
### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
|
|
24
|
+
- **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
|
|
25
|
+
- **Data**: Your input data include a RESPONSE.
|
|
26
|
+
- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
|
|
27
|
+
|
|
24
28
|
user:
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
29
|
+
# Definition
|
|
30
|
+
**Fluency** refers to the effectiveness and clarity of written communication, focusing on grammatical accuracy, vocabulary range, sentence complexity, coherence, and overall readability. It assesses how smoothly ideas are conveyed and how easily the text can be understood by the reader.
|
|
31
|
+
|
|
32
|
+
# Ratings
|
|
33
|
+
## [Fluency: 1] (Emergent Fluency)
|
|
34
|
+
**Definition:** The response shows minimal command of the language. It contains pervasive grammatical errors, extremely limited vocabulary, and fragmented or incoherent sentences. The message is largely incomprehensible, making understanding very difficult.
|
|
35
|
+
|
|
36
|
+
**Examples:**
|
|
37
|
+
**Response:** Free time I. Go park. Not fun. Alone.
|
|
38
|
+
|
|
39
|
+
**Response:** Like food pizza. Good cheese eat.
|
|
40
|
+
|
|
41
|
+
## [Fluency: 2] (Basic Fluency)
|
|
42
|
+
**Definition:** The response communicates simple ideas but has frequent grammatical errors and limited vocabulary. Sentences are short and may be improperly constructed, leading to partial understanding. Repetition and awkward phrasing are common.
|
|
43
|
+
|
|
44
|
+
**Examples:**
|
|
45
|
+
**Response:** I like play soccer. I watch movie. It fun.
|
|
46
|
+
|
|
47
|
+
**Response:** My town small. Many people. We have market.
|
|
48
|
+
|
|
49
|
+
## [Fluency: 3] (Competent Fluency)
|
|
50
|
+
**Definition:** The response clearly conveys ideas with occasional grammatical errors. Vocabulary is adequate but not extensive. Sentences are generally correct but may lack complexity and variety. The text is coherent, and the message is easily understood with minimal effort.
|
|
51
|
+
|
|
52
|
+
**Examples:**
|
|
53
|
+
**Response:** I'm planning to visit friends and maybe see a movie together.
|
|
54
|
+
|
|
55
|
+
**Response:** I try to eat healthy food and exercise regularly by jogging.
|
|
56
|
+
|
|
57
|
+
## [Fluency: 4] (Proficient Fluency)
|
|
58
|
+
**Definition:** The response is well-articulated with good control of grammar and a varied vocabulary. Sentences are complex and well-structured, demonstrating coherence and cohesion. Minor errors may occur but do not affect overall understanding. The text flows smoothly, and ideas are connected logically.
|
|
59
|
+
|
|
60
|
+
**Examples:**
|
|
61
|
+
**Response:** My interest in mathematics and problem-solving inspired me to become an engineer, as I enjoy designing solutions that improve people's lives.
|
|
62
|
+
|
|
63
|
+
**Response:** Environmental conservation is crucial because it protects ecosystems, preserves biodiversity, and ensures natural resources are available for future generations.
|
|
64
|
+
|
|
65
|
+
## [Fluency: 5] (Exceptional Fluency)
|
|
66
|
+
**Definition:** The response demonstrates an exceptional command of language with sophisticated vocabulary and complex, varied sentence structures. It is coherent, cohesive, and engaging, with precise and nuanced expression. Grammar is flawless, and the text reflects a high level of eloquence and style.
|
|
67
|
+
|
|
68
|
+
**Examples:**
|
|
69
|
+
**Response:** Globalization exerts a profound influence on cultural diversity by facilitating unprecedented cultural exchange while simultaneously risking the homogenization of distinct cultural identities, which can diminish the richness of global heritage.
|
|
70
|
+
|
|
71
|
+
**Response:** Technology revolutionizes modern education by providing interactive learning platforms, enabling personalized learning experiences, and connecting students worldwide, thereby transforming how knowledge is acquired and shared.
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# Data
|
|
75
|
+
RESPONSE: {{response}}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
# Tasks
|
|
79
|
+
## Please provide your assessment Score for the previous RESPONSE based on the Definitions above. Your output should include the following information:
|
|
80
|
+
- **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
|
|
81
|
+
- **Explanation**: a very short explanation of why you think the input Data should get that Score.
|
|
82
|
+
- **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
|
|
86
|
+
# Output
|
|
@@ -61,7 +61,7 @@ class GleuScoreEvaluator:
|
|
|
61
61
|
:keyword ground_truth: The ground truth to be compared against.
|
|
62
62
|
:paramtype ground_truth: str
|
|
63
63
|
:return: The GLEU score.
|
|
64
|
-
:rtype:
|
|
64
|
+
:rtype: Dict[str, float]
|
|
65
65
|
"""
|
|
66
66
|
return async_run_allowing_running_loop(
|
|
67
67
|
self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
|
|
@@ -3,9 +3,17 @@
|
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
import os
|
|
5
5
|
from typing import Optional
|
|
6
|
+
|
|
6
7
|
from typing_extensions import override
|
|
8
|
+
from promptflow.core import AsyncPrompty
|
|
7
9
|
|
|
8
10
|
from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
|
|
11
|
+
from ..._common.utils import construct_prompty_model_config, validate_model_config
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
from ..._user_agent import USER_AGENT
|
|
15
|
+
except ImportError:
|
|
16
|
+
USER_AGENT = "None"
|
|
9
17
|
|
|
10
18
|
|
|
11
19
|
class GroundednessEvaluator(PromptyEvaluatorBase):
|
|
@@ -13,8 +21,8 @@ class GroundednessEvaluator(PromptyEvaluatorBase):
|
|
|
13
21
|
Initialize a groundedness evaluator configured for a specific Azure OpenAI model.
|
|
14
22
|
|
|
15
23
|
:param model_config: Configuration for the Azure OpenAI model.
|
|
16
|
-
:type model_config: Union[~azure.ai.
|
|
17
|
-
~azure.ai.
|
|
24
|
+
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
|
|
25
|
+
~azure.ai.evaluation.OpenAIModelConfiguration]
|
|
18
26
|
|
|
19
27
|
**Usage**
|
|
20
28
|
|
|
@@ -31,41 +39,68 @@ class GroundednessEvaluator(PromptyEvaluatorBase):
|
|
|
31
39
|
.. code-block:: python
|
|
32
40
|
|
|
33
41
|
{
|
|
34
|
-
"
|
|
42
|
+
"groundedness": 5,
|
|
43
|
+
"gpt_groundedness": 5,
|
|
35
44
|
}
|
|
45
|
+
|
|
46
|
+
Note: To align with our support of a diverse set of models, a key without the `gpt_` prefix has been added.
|
|
47
|
+
To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
|
|
48
|
+
however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
|
|
36
49
|
"""
|
|
37
50
|
|
|
38
|
-
|
|
39
|
-
|
|
51
|
+
_PROMPTY_FILE_NO_QUERY = "groundedness_without_query.prompty"
|
|
52
|
+
_PROMPTY_FILE_WITH_QUERY = "groundedness_with_query.prompty"
|
|
53
|
+
_RESULT_KEY = "groundedness"
|
|
54
|
+
_OPTIONAL_PARAMS = ["query"]
|
|
40
55
|
|
|
41
56
|
@override
|
|
42
|
-
def __init__(self, model_config
|
|
57
|
+
def __init__(self, model_config):
|
|
43
58
|
current_dir = os.path.dirname(__file__)
|
|
44
|
-
prompty_path = os.path.join(current_dir, self.
|
|
45
|
-
|
|
59
|
+
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE_NO_QUERY) # Default to no query
|
|
60
|
+
|
|
61
|
+
super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
|
|
62
|
+
self._model_config = model_config
|
|
63
|
+
# Needs to be set because it's used in call method to re-validate prompt if `query` is provided
|
|
46
64
|
|
|
47
65
|
@override
|
|
48
66
|
def __call__(
|
|
49
67
|
self,
|
|
50
68
|
*,
|
|
69
|
+
query: Optional[str] = None,
|
|
51
70
|
response: Optional[str] = None,
|
|
52
71
|
context: Optional[str] = None,
|
|
53
|
-
conversation
|
|
54
|
-
**kwargs
|
|
72
|
+
conversation=None,
|
|
73
|
+
**kwargs,
|
|
55
74
|
):
|
|
56
|
-
"""Evaluate
|
|
75
|
+
"""Evaluate groundedness. Accepts either a query, response, and context for a single evaluation,
|
|
57
76
|
or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
|
|
58
77
|
the evaluator will aggregate the results of each turn.
|
|
59
78
|
|
|
60
|
-
:keyword
|
|
79
|
+
:keyword query: The query to be evaluated. Mutually exclusive with `conversation`. Optional parameter for use
|
|
80
|
+
with the `response` and `context` parameters. If provided, a different prompt template will be used for
|
|
81
|
+
evaluation.
|
|
82
|
+
:paramtype query: Optional[str]
|
|
83
|
+
:keyword response: The response to be evaluated. Mutually exclusive with the `conversation` parameter.
|
|
61
84
|
:paramtype response: Optional[str]
|
|
62
|
-
:keyword context: The context to be evaluated.
|
|
85
|
+
:keyword context: The context to be evaluated. Mutually exclusive with the `conversation` parameter.
|
|
63
86
|
:paramtype context: Optional[str]
|
|
64
87
|
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
65
88
|
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
66
89
|
to be dictionaries with keys "content", "role", and possibly "context".
|
|
67
|
-
:paramtype conversation: Optional[
|
|
90
|
+
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
68
91
|
:return: The relevance score.
|
|
69
|
-
:rtype:
|
|
92
|
+
:rtype: Union[Dict[str, float], Dict[str, Union[float, Dict[str, List[float]]]]]
|
|
70
93
|
"""
|
|
71
|
-
|
|
94
|
+
|
|
95
|
+
if query:
|
|
96
|
+
current_dir = os.path.dirname(__file__)
|
|
97
|
+
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE_WITH_QUERY)
|
|
98
|
+
self._prompty_file = prompty_path
|
|
99
|
+
prompty_model_config = construct_prompty_model_config(
|
|
100
|
+
validate_model_config(self._model_config),
|
|
101
|
+
self._DEFAULT_OPEN_API_VERSION,
|
|
102
|
+
USER_AGENT,
|
|
103
|
+
)
|
|
104
|
+
self._flow = AsyncPrompty.load(source=self._prompty_file, model=prompty_model_config)
|
|
105
|
+
|
|
106
|
+
return super().__call__(query=query, response=response, context=context, conversation=conversation, **kwargs)
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: Groundedness
|
|
3
|
+
description: Evaluates groundedness score for RAG scenario
|
|
4
|
+
model:
|
|
5
|
+
api: chat
|
|
6
|
+
parameters:
|
|
7
|
+
temperature: 0.0
|
|
8
|
+
max_tokens: 800
|
|
9
|
+
top_p: 1.0
|
|
10
|
+
presence_penalty: 0
|
|
11
|
+
frequency_penalty: 0
|
|
12
|
+
response_format:
|
|
13
|
+
type: text
|
|
14
|
+
|
|
15
|
+
inputs:
|
|
16
|
+
query:
|
|
17
|
+
type: string
|
|
18
|
+
response:
|
|
19
|
+
type: string
|
|
20
|
+
context:
|
|
21
|
+
type: string
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
---
|
|
25
|
+
system:
|
|
26
|
+
# Instruction
|
|
27
|
+
## Goal
|
|
28
|
+
### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
|
|
29
|
+
- **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
|
|
30
|
+
- **Data**: Your input data include CONTEXT, QUERY, and RESPONSE.
|
|
31
|
+
- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
|
|
32
|
+
|
|
33
|
+
user:
|
|
34
|
+
# Definition
|
|
35
|
+
**Groundedness** refers to how well an answer is anchored in the provided context, evaluating its relevance, accuracy, and completeness based exclusively on that context. It assesses the extent to which the answer directly and fully addresses the question without introducing unrelated or incorrect information. The scale ranges from 1 to 5, with higher numbers indicating greater groundedness.
|
|
36
|
+
|
|
37
|
+
# Ratings
|
|
38
|
+
## [Groundedness: 1] (Completely Unrelated Response)
|
|
39
|
+
**Definition:** An answer that does not relate to the question or the context in any way. It fails to address the topic, provides irrelevant information, or introduces completely unrelated subjects.
|
|
40
|
+
|
|
41
|
+
**Examples:**
|
|
42
|
+
**Context:** The company's annual meeting will be held next Thursday.
|
|
43
|
+
**Query:** When is the company's annual meeting?
|
|
44
|
+
**Response:** I enjoy hiking in the mountains during summer.
|
|
45
|
+
|
|
46
|
+
**Context:** The new policy aims to reduce carbon emissions by 20% over the next five years.
|
|
47
|
+
**Query:** What is the goal of the new policy?
|
|
48
|
+
**Response:** My favorite color is blue.
|
|
49
|
+
|
|
50
|
+
## [Groundedness: 2] (Related Topic but Does Not Respond to the Query)
|
|
51
|
+
**Definition:** An answer that relates to the general topic of the context but does not answer the specific question asked. It may mention concepts from the context but fails to provide a direct or relevant response.
|
|
52
|
+
|
|
53
|
+
**Examples:**
|
|
54
|
+
**Context:** The museum will exhibit modern art pieces from various local artists.
|
|
55
|
+
**Query:** What kind of art will be exhibited at the museum?
|
|
56
|
+
**Response:** Museums are important cultural institutions.
|
|
57
|
+
|
|
58
|
+
**Context:** The new software update improves battery life and performance.
|
|
59
|
+
**Query:** What does the new software update improve?
|
|
60
|
+
**Response:** Software updates can sometimes fix bugs.
|
|
61
|
+
|
|
62
|
+
## [Groundedness: 3] (Attempts to Respond but Contains Incorrect Information)
|
|
63
|
+
**Definition:** An answer that attempts to respond to the question but includes incorrect information not supported by the context. It may misstate facts, misinterpret the context, or provide erroneous details.
|
|
64
|
+
|
|
65
|
+
**Examples:**
|
|
66
|
+
**Context:** The festival starts on June 5th and features international musicians.
|
|
67
|
+
**Query:** When does the festival start?
|
|
68
|
+
**Response:** The festival starts on July 5th and features local artists.
|
|
69
|
+
|
|
70
|
+
**Context:** The recipe requires two eggs and one cup of milk.
|
|
71
|
+
**Query:** How many eggs are needed for the recipe?
|
|
72
|
+
**Response:** You need three eggs for the recipe.
|
|
73
|
+
|
|
74
|
+
## [Groundedness: 4] (Partially Correct Response)
|
|
75
|
+
**Definition:** An answer that provides a correct response to the question but is incomplete or lacks specific details mentioned in the context. It captures some of the necessary information but omits key elements needed for a full understanding.
|
|
76
|
+
|
|
77
|
+
**Examples:**
|
|
78
|
+
**Context:** The bookstore offers a 15% discount to students and a 10% discount to senior citizens.
|
|
79
|
+
**Query:** What discount does the bookstore offer to students?
|
|
80
|
+
**Response:** Students get a discount at the bookstore.
|
|
81
|
+
|
|
82
|
+
**Context:** The company's headquarters are located in Berlin, Germany.
|
|
83
|
+
**Query:** Where are the company's headquarters?
|
|
84
|
+
**Response:** The company's headquarters are in Germany.
|
|
85
|
+
|
|
86
|
+
## [Groundedness: 5] (Fully Correct and Complete Response)
|
|
87
|
+
**Definition:** An answer that thoroughly and accurately responds to the question, including all relevant details from the context. It directly addresses the question with precise information, demonstrating complete understanding without adding extraneous information.
|
|
88
|
+
|
|
89
|
+
**Examples:**
|
|
90
|
+
**Context:** The author released her latest novel, 'The Silent Echo', on September 1st.
|
|
91
|
+
**Query:** When was 'The Silent Echo' released?
|
|
92
|
+
**Response:** 'The Silent Echo' was released on September 1st.
|
|
93
|
+
|
|
94
|
+
**Context:** Participants must register by May 31st to be eligible for early bird pricing.
|
|
95
|
+
**Query:** By what date must participants register to receive early bird pricing?
|
|
96
|
+
**Response:** Participants must register by May 31st to receive early bird pricing.
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# Data
|
|
100
|
+
CONTEXT: {{context}}
|
|
101
|
+
QUERY: {{query}}
|
|
102
|
+
RESPONSE: {{response}}
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
# Tasks
|
|
106
|
+
## Please provide your assessment Score for the previous RESPONSE in relation to the CONTEXT and QUERY based on the Definitions above. Your output should include the following information:
|
|
107
|
+
- **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
|
|
108
|
+
- **Explanation**: a very short explanation of why you think the input Data should get that Score.
|
|
109
|
+
- **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
|
|
113
|
+
# Output
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: Groundedness
|
|
3
|
+
description: Evaluates groundedness score for RAG scenario
|
|
4
|
+
model:
|
|
5
|
+
api: chat
|
|
6
|
+
parameters:
|
|
7
|
+
temperature: 0.0
|
|
8
|
+
max_tokens: 800
|
|
9
|
+
top_p: 1.0
|
|
10
|
+
presence_penalty: 0
|
|
11
|
+
frequency_penalty: 0
|
|
12
|
+
response_format:
|
|
13
|
+
type: text
|
|
14
|
+
|
|
15
|
+
inputs:
|
|
16
|
+
response:
|
|
17
|
+
type: string
|
|
18
|
+
context:
|
|
19
|
+
type: string
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
system:
|
|
23
|
+
# Instruction
|
|
24
|
+
## Goal
|
|
25
|
+
### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
|
|
26
|
+
- **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
|
|
27
|
+
- **Data**: Your input data include CONTEXT and RESPONSE.
|
|
28
|
+
- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
|
|
29
|
+
|
|
30
|
+
user:
|
|
31
|
+
# Definition
|
|
32
|
+
**Groundedness** refers to how faithfully a response adheres to the information provided in the CONTEXT, ensuring that all content is directly supported by the context without introducing unsupported information or omitting critical details. It evaluates the fidelity and precision of the response in relation to the source material.
|
|
33
|
+
|
|
34
|
+
# Ratings
|
|
35
|
+
## [Groundedness: 1] (Completely Ungrounded Response)
|
|
36
|
+
**Definition:** The response is entirely unrelated to the CONTEXT, introducing topics or information that have no connection to the provided material.
|
|
37
|
+
|
|
38
|
+
**Examples:**
|
|
39
|
+
**Context:** The company's profits increased by 20% in the last quarter.
|
|
40
|
+
**Response:** I enjoy playing soccer on weekends with my friends.
|
|
41
|
+
|
|
42
|
+
**Context:** The new smartphone model features a larger display and improved battery life.
|
|
43
|
+
**Response:** The history of ancient Egypt is fascinating and full of mysteries.
|
|
44
|
+
|
|
45
|
+
## [Groundedness: 2] (Contradictory Response)
|
|
46
|
+
**Definition:** The response directly contradicts or misrepresents the information provided in the CONTEXT.
|
|
47
|
+
|
|
48
|
+
**Examples:**
|
|
49
|
+
**Context:** The company's profits increased by 20% in the last quarter.
|
|
50
|
+
**Response:** The company's profits decreased by 20% in the last quarter.
|
|
51
|
+
|
|
52
|
+
**Context:** The new smartphone model features a larger display and improved battery life.
|
|
53
|
+
**Response:** The new smartphone model has a smaller display and shorter battery life.
|
|
54
|
+
|
|
55
|
+
## [Groundedness: 3] (Accurate Response with Unsupported Additions)
|
|
56
|
+
**Definition:** The response accurately includes information from the CONTEXT but adds details, opinions, or explanations that are not supported by the provided material.
|
|
57
|
+
|
|
58
|
+
**Examples:**
|
|
59
|
+
**Context:** The company's profits increased by 20% in the last quarter.
|
|
60
|
+
**Response:** The company's profits increased by 20% in the last quarter due to their aggressive marketing strategy.
|
|
61
|
+
|
|
62
|
+
**Context:** The new smartphone model features a larger display and improved battery life.
|
|
63
|
+
**Response:** The new smartphone model features a larger display, improved battery life, and comes with a free case.
|
|
64
|
+
|
|
65
|
+
## [Groundedness: 4] (Incomplete Response Missing Critical Details)
|
|
66
|
+
**Definition:** The response contains information from the CONTEXT but omits essential details that are necessary for a comprehensive understanding of the main point.
|
|
67
|
+
|
|
68
|
+
**Examples:**
|
|
69
|
+
**Context:** The company's profits increased by 20% in the last quarter, marking the highest growth rate in its history.
|
|
70
|
+
**Response:** The company's profits increased by 20% in the last quarter.
|
|
71
|
+
|
|
72
|
+
**Context:** The new smartphone model features a larger display, improved battery life, and an upgraded camera system.
|
|
73
|
+
**Response:** The new smartphone model features a larger display and improved battery life.
|
|
74
|
+
|
|
75
|
+
## [Groundedness: 5] (Fully Grounded and Complete Response)
|
|
76
|
+
**Definition:** The response is entirely based on the CONTEXT, accurately and thoroughly conveying all essential information without introducing unsupported details or omitting critical points.
|
|
77
|
+
|
|
78
|
+
**Examples:**
|
|
79
|
+
**Context:** The company's profits increased by 20% in the last quarter, marking the highest growth rate in its history.
|
|
80
|
+
**Response:** The company's profits increased by 20% in the last quarter, marking the highest growth rate in its history.
|
|
81
|
+
|
|
82
|
+
**Context:** The new smartphone model features a larger display, improved battery life, and an upgraded camera system.
|
|
83
|
+
**Response:** The new smartphone model features a larger display, improved battery life, and an upgraded camera system.
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
# Data
|
|
87
|
+
CONTEXT: {{context}}
|
|
88
|
+
RESPONSE: {{response}}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
# Tasks
|
|
92
|
+
## Please provide your assessment Score for the previous RESPONSE in relation to the CONTEXT based on the Definitions above. Your output should include the following information:
|
|
93
|
+
- **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
|
|
94
|
+
- **Explanation**: a very short explanation of why you think the input Data should get that Score.
|
|
95
|
+
- **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
|
|
99
|
+
# Output
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
# ---------------------------------------------------------
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
|
-
import nltk
|
|
5
4
|
from nltk.translate.meteor_score import meteor_score
|
|
6
5
|
from promptflow._utils.async_utils import async_run_allowing_running_loop
|
|
7
6
|
|
|
8
|
-
from azure.ai.evaluation._common.utils import nltk_tokenize
|
|
7
|
+
from azure.ai.evaluation._common.utils import nltk_tokenize, ensure_nltk_data_downloaded
|
|
9
8
|
|
|
10
9
|
|
|
11
10
|
class _AsyncMeteorScoreEvaluator:
|
|
@@ -14,10 +13,7 @@ class _AsyncMeteorScoreEvaluator:
|
|
|
14
13
|
self._beta = beta
|
|
15
14
|
self._gamma = gamma
|
|
16
15
|
|
|
17
|
-
|
|
18
|
-
nltk.find("corpora/wordnet.zip")
|
|
19
|
-
except LookupError:
|
|
20
|
-
nltk.download("wordnet")
|
|
16
|
+
ensure_nltk_data_downloaded()
|
|
21
17
|
|
|
22
18
|
async def __call__(self, *, ground_truth: str, response: str, **kwargs):
|
|
23
19
|
reference_tokens = nltk_tokenize(ground_truth)
|
|
@@ -87,7 +83,7 @@ class MeteorScoreEvaluator:
|
|
|
87
83
|
:keyword ground_truth: The ground truth to be compared against.
|
|
88
84
|
:paramtype ground_truth: str
|
|
89
85
|
:return: The METEOR score.
|
|
90
|
-
:rtype:
|
|
86
|
+
:rtype: Dict[str, float]
|
|
91
87
|
"""
|
|
92
88
|
return async_run_allowing_running_loop(
|
|
93
89
|
self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from ._content_safety_multimodal import ContentSafetyMultimodalEvaluator
|
|
5
|
+
from ._content_safety_multimodal_base import ContentSafetyMultimodalEvaluatorBase
|
|
6
|
+
from ._hate_unfairness import HateUnfairnessMultimodalEvaluator
|
|
7
|
+
from ._self_harm import SelfHarmMultimodalEvaluator
|
|
8
|
+
from ._sexual import SexualMultimodalEvaluator
|
|
9
|
+
from ._violence import ViolenceMultimodalEvaluator
|
|
10
|
+
from ._protected_material import ProtectedMaterialMultimodalEvaluator
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"ContentSafetyMultimodalEvaluator",
|
|
14
|
+
"ContentSafetyMultimodalEvaluatorBase",
|
|
15
|
+
"ViolenceMultimodalEvaluator",
|
|
16
|
+
"SexualMultimodalEvaluator",
|
|
17
|
+
"SelfHarmMultimodalEvaluator",
|
|
18
|
+
"HateUnfairnessMultimodalEvaluator",
|
|
19
|
+
"ProtectedMaterialMultimodalEvaluator",
|
|
20
|
+
]
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
import logging
|
|
5
|
+
from concurrent.futures import as_completed
|
|
6
|
+
from typing import Callable, Dict, List, Union
|
|
7
|
+
|
|
8
|
+
from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
|
|
9
|
+
from azure.ai.evaluation._common._experimental import experimental
|
|
10
|
+
from azure.ai.evaluation._common.utils import validate_conversation
|
|
11
|
+
from ._hate_unfairness import HateUnfairnessMultimodalEvaluator
|
|
12
|
+
from ._self_harm import SelfHarmMultimodalEvaluator
|
|
13
|
+
from ._sexual import SexualMultimodalEvaluator
|
|
14
|
+
from ._violence import ViolenceMultimodalEvaluator
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@experimental
|
|
21
|
+
class ContentSafetyMultimodalEvaluator:
|
|
22
|
+
"""
|
|
23
|
+
Initialize a content safety multimodal evaluator configured to evaluate content safety metrics
|
|
24
|
+
in a multimodal scenario.
|
|
25
|
+
|
|
26
|
+
:param credential: The credential required for connecting to the Azure AI project.
|
|
27
|
+
:type credential: ~azure.core.credentials.TokenCredential
|
|
28
|
+
:param azure_ai_project: The scope of the Azure AI project, containing the subscription ID,
|
|
29
|
+
resource group, and project name.
|
|
30
|
+
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
|
|
31
|
+
:param parallel: Specifies whether to use parallel execution for evaluators.
|
|
32
|
+
If True, evaluators execute in parallel; otherwise, they execute sequentially. Defaults to True.
|
|
33
|
+
:type parallel: bool
|
|
34
|
+
|
|
35
|
+
:return: A function that evaluates multimodal chat messages and generates content safety metrics.
|
|
36
|
+
:rtype: Callable
|
|
37
|
+
|
|
38
|
+
**Usage Example**
|
|
39
|
+
|
|
40
|
+
.. code-block:: python
|
|
41
|
+
|
|
42
|
+
azure_ai_project = {
|
|
43
|
+
"subscription_id": "<subscription_id>",
|
|
44
|
+
"resource_group_name": "<resource_group_name>",
|
|
45
|
+
"project_name": "<project_name>",
|
|
46
|
+
}
|
|
47
|
+
eval_fn = ContentSafetyMultimodalEvaluator(azure_ai_project)
|
|
48
|
+
result = eval_fn(
|
|
49
|
+
{
|
|
50
|
+
"messages": [
|
|
51
|
+
{
|
|
52
|
+
"role": "user",
|
|
53
|
+
"content": [
|
|
54
|
+
{
|
|
55
|
+
"type": "text",
|
|
56
|
+
"text": "What's in this image?"
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"type": "image_url",
|
|
60
|
+
"image_url": {
|
|
61
|
+
"url": "<image url or base64 encoded image>"
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
]
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
"role": "assistant",
|
|
68
|
+
"content": "This picture shows an astronaut standing in the desert."
|
|
69
|
+
}
|
|
70
|
+
]
|
|
71
|
+
}
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
**Output Format**
|
|
75
|
+
|
|
76
|
+
.. code-block:: json
|
|
77
|
+
|
|
78
|
+
{
|
|
79
|
+
"violence": "Medium",
|
|
80
|
+
"violence_score": 5.0,
|
|
81
|
+
"violence_reason": "Some reason",
|
|
82
|
+
"sexual": "Medium",
|
|
83
|
+
"sexual_score": 5.0,
|
|
84
|
+
"sexual_reason": "Some reason",
|
|
85
|
+
"self_harm": "Medium",
|
|
86
|
+
"self_harm_score": 5.0,
|
|
87
|
+
"self_harm_reason": "Some reason",
|
|
88
|
+
"hate_unfairness": "Medium",
|
|
89
|
+
"hate_unfairness_score": 5.0,
|
|
90
|
+
"hate_unfairness_reason": "Some reason"
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
def __init__(self, credential, azure_ai_project, parallel: bool = False):
|
|
96
|
+
self._parallel = parallel
|
|
97
|
+
self._evaluators: List[Callable[..., Dict[str, Union[str, float]]]] = [
|
|
98
|
+
ViolenceMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
99
|
+
SexualMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
100
|
+
SelfHarmMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
101
|
+
HateUnfairnessMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
102
|
+
]
|
|
103
|
+
|
|
104
|
+
def __call__(self, *, conversation, **kwargs):
|
|
105
|
+
"""
|
|
106
|
+
Evaluates content-safety metrics for list of messages.
|
|
107
|
+
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
108
|
+
Each message should have "role" and "content" keys.
|
|
109
|
+
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
110
|
+
:return: The evaluation score based on the Content Safety Metrics.
|
|
111
|
+
:rtype: Dict[str, Union[float, str]]
|
|
112
|
+
"""
|
|
113
|
+
# validate inputs
|
|
114
|
+
validate_conversation(conversation)
|
|
115
|
+
results: Dict[str, Union[str, float]] = {}
|
|
116
|
+
if self._parallel:
|
|
117
|
+
with ThreadPoolExecutor() as executor:
|
|
118
|
+
futures = {
|
|
119
|
+
executor.submit(evaluator, conversation=conversation, **kwargs): evaluator
|
|
120
|
+
for evaluator in self._evaluators
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
for future in as_completed(futures):
|
|
124
|
+
results.update(future.result())
|
|
125
|
+
else:
|
|
126
|
+
for evaluator in self._evaluators:
|
|
127
|
+
result = evaluator(conversation=conversation, **kwargs)
|
|
128
|
+
results.update(result)
|
|
129
|
+
|
|
130
|
+
return results
|