azure-ai-evaluation 1.0.0b4__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/ai/evaluation/__init__.py +22 -0
- azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +4 -0
- azure/ai/evaluation/_common/constants.py +5 -0
- azure/ai/evaluation/_common/math.py +73 -2
- azure/ai/evaluation/_common/rai_service.py +250 -62
- azure/ai/evaluation/_common/utils.py +196 -23
- azure/ai/evaluation/_constants.py +7 -6
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/__init__.py +3 -2
- azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +13 -4
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/proxy_client.py +19 -6
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +46 -0
- azure/ai/evaluation/_evaluate/_eval_run.py +55 -14
- azure/ai/evaluation/_evaluate/_evaluate.py +312 -228
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +7 -6
- azure/ai/evaluation/_evaluate/_utils.py +46 -11
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +17 -18
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +67 -31
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -34
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +37 -24
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +21 -9
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +52 -16
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +91 -48
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +100 -26
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +94 -26
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +96 -26
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +97 -26
- azure/ai/evaluation/_evaluators/_eci/_eci.py +31 -4
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +67 -36
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -36
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +14 -16
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +106 -34
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +20 -27
- azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +132 -0
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +55 -0
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +100 -0
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +124 -0
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +100 -0
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +100 -0
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +100 -0
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +87 -31
- azure/ai/evaluation/_evaluators/_qa/_qa.py +23 -31
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +72 -36
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +78 -42
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +83 -125
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +74 -24
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +26 -27
- azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +148 -0
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +37 -28
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +94 -33
- azure/ai/evaluation/_exceptions.py +19 -0
- azure/ai/evaluation/_model_configurations.py +83 -15
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/simulator/__init__.py +2 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +20 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +29 -35
- azure/ai/evaluation/simulator/_constants.py +11 -1
- azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
- azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +17 -9
- azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +22 -1
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +90 -35
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +4 -2
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +8 -4
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -4
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -1
- azure/ai/evaluation/simulator/_simulator.py +165 -105
- azure/ai/evaluation/simulator/_utils.py +31 -13
- azure_ai_evaluation-1.0.1.dist-info/METADATA +600 -0
- {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/NOTICE.txt +20 -0
- azure_ai_evaluation-1.0.1.dist-info/RECORD +119 -0
- {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/WHEEL +1 -1
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
- azure_ai_evaluation-1.0.0b4.dist-info/METADATA +0 -535
- azure_ai_evaluation-1.0.0b4.dist-info/RECORD +0 -106
- /azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +0 -0
- {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: Groundedness
|
|
3
|
+
description: Evaluates groundedness score for RAG scenario
|
|
4
|
+
model:
|
|
5
|
+
api: chat
|
|
6
|
+
parameters:
|
|
7
|
+
temperature: 0.0
|
|
8
|
+
max_tokens: 800
|
|
9
|
+
top_p: 1.0
|
|
10
|
+
presence_penalty: 0
|
|
11
|
+
frequency_penalty: 0
|
|
12
|
+
response_format:
|
|
13
|
+
type: text
|
|
14
|
+
|
|
15
|
+
inputs:
|
|
16
|
+
response:
|
|
17
|
+
type: string
|
|
18
|
+
context:
|
|
19
|
+
type: string
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
system:
|
|
23
|
+
# Instruction
|
|
24
|
+
## Goal
|
|
25
|
+
### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
|
|
26
|
+
- **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
|
|
27
|
+
- **Data**: Your input data include CONTEXT and RESPONSE.
|
|
28
|
+
- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
|
|
29
|
+
|
|
30
|
+
user:
|
|
31
|
+
# Definition
|
|
32
|
+
**Groundedness** refers to how faithfully a response adheres to the information provided in the CONTEXT, ensuring that all content is directly supported by the context without introducing unsupported information or omitting critical details. It evaluates the fidelity and precision of the response in relation to the source material.
|
|
33
|
+
|
|
34
|
+
# Ratings
|
|
35
|
+
## [Groundedness: 1] (Completely Ungrounded Response)
|
|
36
|
+
**Definition:** The response is entirely unrelated to the CONTEXT, introducing topics or information that have no connection to the provided material.
|
|
37
|
+
|
|
38
|
+
**Examples:**
|
|
39
|
+
**Context:** The company's profits increased by 20% in the last quarter.
|
|
40
|
+
**Response:** I enjoy playing soccer on weekends with my friends.
|
|
41
|
+
|
|
42
|
+
**Context:** The new smartphone model features a larger display and improved battery life.
|
|
43
|
+
**Response:** The history of ancient Egypt is fascinating and full of mysteries.
|
|
44
|
+
|
|
45
|
+
## [Groundedness: 2] (Contradictory Response)
|
|
46
|
+
**Definition:** The response directly contradicts or misrepresents the information provided in the CONTEXT.
|
|
47
|
+
|
|
48
|
+
**Examples:**
|
|
49
|
+
**Context:** The company's profits increased by 20% in the last quarter.
|
|
50
|
+
**Response:** The company's profits decreased by 20% in the last quarter.
|
|
51
|
+
|
|
52
|
+
**Context:** The new smartphone model features a larger display and improved battery life.
|
|
53
|
+
**Response:** The new smartphone model has a smaller display and shorter battery life.
|
|
54
|
+
|
|
55
|
+
## [Groundedness: 3] (Accurate Response with Unsupported Additions)
|
|
56
|
+
**Definition:** The response accurately includes information from the CONTEXT but adds details, opinions, or explanations that are not supported by the provided material.
|
|
57
|
+
|
|
58
|
+
**Examples:**
|
|
59
|
+
**Context:** The company's profits increased by 20% in the last quarter.
|
|
60
|
+
**Response:** The company's profits increased by 20% in the last quarter due to their aggressive marketing strategy.
|
|
61
|
+
|
|
62
|
+
**Context:** The new smartphone model features a larger display and improved battery life.
|
|
63
|
+
**Response:** The new smartphone model features a larger display, improved battery life, and comes with a free case.
|
|
64
|
+
|
|
65
|
+
## [Groundedness: 4] (Incomplete Response Missing Critical Details)
|
|
66
|
+
**Definition:** The response contains information from the CONTEXT but omits essential details that are necessary for a comprehensive understanding of the main point.
|
|
67
|
+
|
|
68
|
+
**Examples:**
|
|
69
|
+
**Context:** The company's profits increased by 20% in the last quarter, marking the highest growth rate in its history.
|
|
70
|
+
**Response:** The company's profits increased by 20% in the last quarter.
|
|
71
|
+
|
|
72
|
+
**Context:** The new smartphone model features a larger display, improved battery life, and an upgraded camera system.
|
|
73
|
+
**Response:** The new smartphone model features a larger display and improved battery life.
|
|
74
|
+
|
|
75
|
+
## [Groundedness: 5] (Fully Grounded and Complete Response)
|
|
76
|
+
**Definition:** The response is entirely based on the CONTEXT, accurately and thoroughly conveying all essential information without introducing unsupported details or omitting critical points.
|
|
77
|
+
|
|
78
|
+
**Examples:**
|
|
79
|
+
**Context:** The company's profits increased by 20% in the last quarter, marking the highest growth rate in its history.
|
|
80
|
+
**Response:** The company's profits increased by 20% in the last quarter, marking the highest growth rate in its history.
|
|
81
|
+
|
|
82
|
+
**Context:** The new smartphone model features a larger display, improved battery life, and an upgraded camera system.
|
|
83
|
+
**Response:** The new smartphone model features a larger display, improved battery life, and an upgraded camera system.
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
# Data
|
|
87
|
+
CONTEXT: {{context}}
|
|
88
|
+
RESPONSE: {{response}}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
# Tasks
|
|
92
|
+
## Please provide your assessment Score for the previous RESPONSE in relation to the CONTEXT based on the Definitions above. Your output should include the following information:
|
|
93
|
+
- **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
|
|
94
|
+
- **Explanation**: a very short explanation of why you think the input Data should get that Score.
|
|
95
|
+
- **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
|
|
99
|
+
# Output
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
# ---------------------------------------------------------
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
|
-
import nltk
|
|
5
4
|
from nltk.translate.meteor_score import meteor_score
|
|
6
5
|
from promptflow._utils.async_utils import async_run_allowing_running_loop
|
|
7
6
|
|
|
8
|
-
from azure.ai.evaluation._common.utils import nltk_tokenize
|
|
7
|
+
from azure.ai.evaluation._common.utils import nltk_tokenize, ensure_nltk_data_downloaded
|
|
9
8
|
|
|
10
9
|
|
|
11
10
|
class _AsyncMeteorScoreEvaluator:
|
|
@@ -14,10 +13,7 @@ class _AsyncMeteorScoreEvaluator:
|
|
|
14
13
|
self._beta = beta
|
|
15
14
|
self._gamma = gamma
|
|
16
15
|
|
|
17
|
-
|
|
18
|
-
nltk.find("corpora/wordnet.zip")
|
|
19
|
-
except LookupError:
|
|
20
|
-
nltk.download("wordnet")
|
|
16
|
+
ensure_nltk_data_downloaded()
|
|
21
17
|
|
|
22
18
|
async def __call__(self, *, ground_truth: str, response: str, **kwargs):
|
|
23
19
|
reference_tokens = nltk_tokenize(ground_truth)
|
|
@@ -38,7 +34,7 @@ class _AsyncMeteorScoreEvaluator:
|
|
|
38
34
|
|
|
39
35
|
class MeteorScoreEvaluator:
|
|
40
36
|
"""
|
|
41
|
-
|
|
37
|
+
Calculates the METEOR score for a given response and ground truth.
|
|
42
38
|
|
|
43
39
|
The METEOR (Metric for Evaluation of Translation with Explicit Ordering) score grader evaluates generated text by
|
|
44
40
|
comparing it to reference texts, focusing on precision, recall, and content alignment. It addresses limitations of
|
|
@@ -46,6 +42,12 @@ class MeteorScoreEvaluator:
|
|
|
46
42
|
word stems to more accurately capture meaning and language variations. In addition to machine translation and
|
|
47
43
|
text summarization, paraphrase detection is an optimal use case for the METEOR score.
|
|
48
44
|
|
|
45
|
+
Use the METEOR score when you want a more linguistically informed evaluation metric that captures not only
|
|
46
|
+
n-gram overlap but also accounts for synonyms, stemming, and word order. This is particularly useful for evaluating
|
|
47
|
+
tasks like machine translation, text summarization, and text generation.
|
|
48
|
+
|
|
49
|
+
The METEOR score ranges from 0 to 1, with 1 indicating a perfect match.
|
|
50
|
+
|
|
49
51
|
:param alpha: The METEOR score alpha parameter. Default is 0.9.
|
|
50
52
|
:type alpha: float
|
|
51
53
|
:param beta: The METEOR score beta parameter. Default is 3.0.
|
|
@@ -53,28 +55,19 @@ class MeteorScoreEvaluator:
|
|
|
53
55
|
:param gamma: The METEOR score gamma parameter. Default is 0.5.
|
|
54
56
|
:type gamma: float
|
|
55
57
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
.. code-block:: python
|
|
59
|
-
|
|
60
|
-
eval_fn = MeteorScoreEvaluator(
|
|
61
|
-
alpha=0.9,
|
|
62
|
-
beta=3.0,
|
|
63
|
-
gamma=0.5
|
|
64
|
-
)
|
|
65
|
-
result = eval_fn(
|
|
66
|
-
response="Tokyo is the capital of Japan.",
|
|
67
|
-
ground_truth="The capital of Japan is Tokyo.")
|
|
58
|
+
.. admonition:: Example:
|
|
68
59
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
}
|
|
60
|
+
.. literalinclude:: ../samples/evaluation_samples_evaluate.py
|
|
61
|
+
:start-after: [START meteor_score_evaluator]
|
|
62
|
+
:end-before: [END meteor_score_evaluator]
|
|
63
|
+
:language: python
|
|
64
|
+
:dedent: 8
|
|
65
|
+
:caption: Initialize and call a MeteorScoreEvaluator with alpha of 0.8.
|
|
76
66
|
"""
|
|
77
67
|
|
|
68
|
+
id = "azureml://registries/azureml/models/Meteor-Score-Evaluator/versions/3"
|
|
69
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
70
|
+
|
|
78
71
|
def __init__(self, alpha: float = 0.9, beta: float = 3.0, gamma: float = 0.5):
|
|
79
72
|
self._async_evaluator = _AsyncMeteorScoreEvaluator(alpha=alpha, beta=beta, gamma=gamma)
|
|
80
73
|
|
|
@@ -87,7 +80,7 @@ class MeteorScoreEvaluator:
|
|
|
87
80
|
:keyword ground_truth: The ground truth to be compared against.
|
|
88
81
|
:paramtype ground_truth: str
|
|
89
82
|
:return: The METEOR score.
|
|
90
|
-
:rtype:
|
|
83
|
+
:rtype: Dict[str, float]
|
|
91
84
|
"""
|
|
92
85
|
return async_run_allowing_running_loop(
|
|
93
86
|
self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from ._content_safety_multimodal import ContentSafetyMultimodalEvaluator
|
|
5
|
+
from ._content_safety_multimodal_base import ContentSafetyMultimodalEvaluatorBase
|
|
6
|
+
from ._hate_unfairness import HateUnfairnessMultimodalEvaluator
|
|
7
|
+
from ._self_harm import SelfHarmMultimodalEvaluator
|
|
8
|
+
from ._sexual import SexualMultimodalEvaluator
|
|
9
|
+
from ._violence import ViolenceMultimodalEvaluator
|
|
10
|
+
from ._protected_material import ProtectedMaterialMultimodalEvaluator
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"ContentSafetyMultimodalEvaluator",
|
|
14
|
+
"ContentSafetyMultimodalEvaluatorBase",
|
|
15
|
+
"ViolenceMultimodalEvaluator",
|
|
16
|
+
"SexualMultimodalEvaluator",
|
|
17
|
+
"SelfHarmMultimodalEvaluator",
|
|
18
|
+
"HateUnfairnessMultimodalEvaluator",
|
|
19
|
+
"ProtectedMaterialMultimodalEvaluator",
|
|
20
|
+
]
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
import logging
|
|
5
|
+
from concurrent.futures import as_completed
|
|
6
|
+
from typing import Callable, Dict, List, Union
|
|
7
|
+
|
|
8
|
+
from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
|
|
9
|
+
from azure.ai.evaluation._common._experimental import experimental
|
|
10
|
+
from azure.ai.evaluation._common.utils import validate_conversation
|
|
11
|
+
from ._hate_unfairness import HateUnfairnessMultimodalEvaluator
|
|
12
|
+
from ._self_harm import SelfHarmMultimodalEvaluator
|
|
13
|
+
from ._sexual import SexualMultimodalEvaluator
|
|
14
|
+
from ._violence import ViolenceMultimodalEvaluator
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@experimental
|
|
21
|
+
class ContentSafetyMultimodalEvaluator:
|
|
22
|
+
"""
|
|
23
|
+
Initialize a content safety multimodal evaluator configured to evaluate content safety metrics
|
|
24
|
+
in a multimodal scenario.
|
|
25
|
+
|
|
26
|
+
:param credential: The credential required for connecting to the Azure AI project.
|
|
27
|
+
:type credential: ~azure.core.credentials.TokenCredential
|
|
28
|
+
:param azure_ai_project: The scope of the Azure AI project, containing the subscription ID,
|
|
29
|
+
resource group, and project name.
|
|
30
|
+
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
|
|
31
|
+
:param kwargs: Additional arguments to pass to the evaluator.
|
|
32
|
+
:type kwargs: Any
|
|
33
|
+
|
|
34
|
+
:return: A function that evaluates multimodal chat messages and generates content safety metrics.
|
|
35
|
+
|
|
36
|
+
**Usage Example**
|
|
37
|
+
|
|
38
|
+
.. code-block:: python
|
|
39
|
+
|
|
40
|
+
azure_ai_project = {
|
|
41
|
+
"subscription_id": "<subscription_id>",
|
|
42
|
+
"resource_group_name": "<resource_group_name>",
|
|
43
|
+
"project_name": "<project_name>",
|
|
44
|
+
}
|
|
45
|
+
eval_fn = ContentSafetyMultimodalEvaluator(azure_ai_project)
|
|
46
|
+
result = eval_fn(conversation=
|
|
47
|
+
{
|
|
48
|
+
"messages": [
|
|
49
|
+
{
|
|
50
|
+
"role": "user",
|
|
51
|
+
"content": [
|
|
52
|
+
{
|
|
53
|
+
"type": "text",
|
|
54
|
+
"text": "What's in this image?"
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"type": "image_url",
|
|
58
|
+
"image_url": {
|
|
59
|
+
"url": "<image url or base64 encoded image>"
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
]
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"role": "assistant",
|
|
66
|
+
"content": "This picture shows an astronaut standing in the desert."
|
|
67
|
+
}
|
|
68
|
+
]
|
|
69
|
+
}
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
**Output Format**
|
|
73
|
+
|
|
74
|
+
.. code-block:: json
|
|
75
|
+
|
|
76
|
+
{
|
|
77
|
+
"violence": "Medium",
|
|
78
|
+
"violence_score": 5.0,
|
|
79
|
+
"violence_reason": "Some reason",
|
|
80
|
+
"sexual": "Medium",
|
|
81
|
+
"sexual_score": 5.0,
|
|
82
|
+
"sexual_reason": "Some reason",
|
|
83
|
+
"self_harm": "Medium",
|
|
84
|
+
"self_harm_score": 5.0,
|
|
85
|
+
"self_harm_reason": "Some reason",
|
|
86
|
+
"hate_unfairness": "Medium",
|
|
87
|
+
"hate_unfairness_score": 5.0,
|
|
88
|
+
"hate_unfairness_reason": "Some reason"
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
id = "content_safety_multimodal"
|
|
94
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
95
|
+
|
|
96
|
+
def __init__(self, credential, azure_ai_project, **kwargs):
|
|
97
|
+
self._parallel = kwargs.pop("_parallel", False)
|
|
98
|
+
self._evaluators: List[Callable[..., Dict[str, Union[str, float]]]] = [
|
|
99
|
+
ViolenceMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
100
|
+
SexualMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
101
|
+
SelfHarmMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
102
|
+
HateUnfairnessMultimodalEvaluator(credential=credential, azure_ai_project=azure_ai_project),
|
|
103
|
+
]
|
|
104
|
+
|
|
105
|
+
def __call__(self, *, conversation, **kwargs):
|
|
106
|
+
"""
|
|
107
|
+
Evaluates content-safety metrics for list of messages.
|
|
108
|
+
|
|
109
|
+
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
110
|
+
Each message should have "role" and "content" keys. It supports single turn only.
|
|
111
|
+
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
112
|
+
:return: The evaluation score based on the Content Safety Metrics.
|
|
113
|
+
:rtype: Dict[str, Union[float, str]]
|
|
114
|
+
"""
|
|
115
|
+
# validate inputs
|
|
116
|
+
validate_conversation(conversation)
|
|
117
|
+
results: Dict[str, Union[str, float]] = {}
|
|
118
|
+
if self._parallel:
|
|
119
|
+
with ThreadPoolExecutor() as executor:
|
|
120
|
+
futures = {
|
|
121
|
+
executor.submit(evaluator, conversation=conversation, **kwargs): evaluator
|
|
122
|
+
for evaluator in self._evaluators
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
for future in as_completed(futures):
|
|
126
|
+
results.update(future.result())
|
|
127
|
+
else:
|
|
128
|
+
for evaluator in self._evaluators:
|
|
129
|
+
result = evaluator(conversation=conversation, **kwargs)
|
|
130
|
+
results.update(result)
|
|
131
|
+
|
|
132
|
+
return results
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from abc import ABC
|
|
5
|
+
from typing import Union
|
|
6
|
+
from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service_multimodal
|
|
7
|
+
from azure.ai.evaluation._common.constants import EvaluationMetrics, _InternalEvaluationMetrics
|
|
8
|
+
from azure.ai.evaluation._common.utils import validate_conversation
|
|
9
|
+
from azure.core.credentials import TokenCredential
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ContentSafetyMultimodalEvaluatorBase(ABC):
|
|
13
|
+
"""
|
|
14
|
+
Initialize a evaluator for a specified Evaluation Metric. Base class that is not
|
|
15
|
+
meant to be instantiated by users.
|
|
16
|
+
|
|
17
|
+
:param metric: The metric to be evaluated.
|
|
18
|
+
:type metric: ~azure.ai.evaluation._evaluators._content_safety.flow.constants.EvaluationMetrics
|
|
19
|
+
:param credential: The credential for connecting to Azure AI project. Required
|
|
20
|
+
:type credential: ~azure.core.credentials.TokenCredential
|
|
21
|
+
:param azure_ai_project: The scope of the Azure AI project.
|
|
22
|
+
It contains subscription id, resource group, and project name.
|
|
23
|
+
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
metric: Union[EvaluationMetrics, _InternalEvaluationMetrics],
|
|
29
|
+
credential: TokenCredential,
|
|
30
|
+
azure_ai_project,
|
|
31
|
+
):
|
|
32
|
+
self._metric = metric
|
|
33
|
+
self._azure_ai_project = azure_ai_project
|
|
34
|
+
self._credential = credential
|
|
35
|
+
|
|
36
|
+
async def __call__(self, *, conversation, **kwargs):
|
|
37
|
+
"""
|
|
38
|
+
Evaluates content according to this evaluator's metric.
|
|
39
|
+
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
40
|
+
Each message should have "role" and "content" keys.
|
|
41
|
+
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
42
|
+
:return: The evaluation score computation based on the Content Safety metric (self.metric).
|
|
43
|
+
:rtype: Dict[str, Union[float, str]]
|
|
44
|
+
"""
|
|
45
|
+
# validate inputs
|
|
46
|
+
validate_conversation(conversation)
|
|
47
|
+
messages = conversation["messages"]
|
|
48
|
+
# Run score computation based on supplied metric.
|
|
49
|
+
result = await evaluate_with_rai_service_multimodal(
|
|
50
|
+
messages=messages,
|
|
51
|
+
metric_name=self._metric,
|
|
52
|
+
project_scope=self._azure_ai_project,
|
|
53
|
+
credential=self._credential,
|
|
54
|
+
)
|
|
55
|
+
return result
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from promptflow._utils.async_utils import async_run_allowing_running_loop
|
|
5
|
+
from azure.ai.evaluation._common._experimental import experimental
|
|
6
|
+
from azure.ai.evaluation._common.constants import EvaluationMetrics
|
|
7
|
+
from ._content_safety_multimodal_base import ContentSafetyMultimodalEvaluatorBase
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@experimental
|
|
11
|
+
class HateUnfairnessMultimodalEvaluator:
|
|
12
|
+
"""
|
|
13
|
+
Initialize a hate-unfairness multimodal evaluator for hate unfairness score.
|
|
14
|
+
|
|
15
|
+
:param credential: The credential for connecting to the Azure AI project. This is required.
|
|
16
|
+
:type credential: ~azure.core.credentials.TokenCredential
|
|
17
|
+
:param azure_ai_project: The scope of the Azure AI project,
|
|
18
|
+
which includes subscription ID, resource group name, and project name.
|
|
19
|
+
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
|
|
20
|
+
|
|
21
|
+
**Usage Example**
|
|
22
|
+
|
|
23
|
+
.. code-block:: python
|
|
24
|
+
|
|
25
|
+
azure_ai_project = {
|
|
26
|
+
"subscription_id": "<subscription_id>",
|
|
27
|
+
"resource_group_name": "<resource_group_name>",
|
|
28
|
+
"project_name": "<project_name>",
|
|
29
|
+
}
|
|
30
|
+
eval_fn = HateUnfairnessMultimodalEvaluator(azure_ai_project)
|
|
31
|
+
result = eval_fn(conversation=
|
|
32
|
+
{
|
|
33
|
+
"messages": [
|
|
34
|
+
{
|
|
35
|
+
"role": "user",
|
|
36
|
+
"content": [
|
|
37
|
+
{
|
|
38
|
+
"type": "text",
|
|
39
|
+
"text": "What's in this image?"
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"type": "image_url",
|
|
43
|
+
"image_url": {
|
|
44
|
+
"url": "<image url or base64 encoded image>"
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
]
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
"role": "assistant",
|
|
51
|
+
"content": "This picture shows an astronaut standing in the desert."
|
|
52
|
+
}
|
|
53
|
+
]
|
|
54
|
+
}
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
**Output Format**
|
|
58
|
+
|
|
59
|
+
.. code-block:: json
|
|
60
|
+
|
|
61
|
+
{
|
|
62
|
+
"hate_unfairness": "High",
|
|
63
|
+
"hate_unfairness_score": 6.5,
|
|
64
|
+
"hate_unfairness_reason": "Some reason"
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
id = "hate_unfairness_multimodal"
|
|
70
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
71
|
+
|
|
72
|
+
def __init__(self, credential, azure_ai_project):
|
|
73
|
+
self._async_evaluator = _AsyncHateUnfairnessMultimodalEvaluator(credential, azure_ai_project)
|
|
74
|
+
|
|
75
|
+
def __call__(self, *, conversation, **kwargs):
|
|
76
|
+
"""
|
|
77
|
+
Evaluates hate unfairness content.
|
|
78
|
+
|
|
79
|
+
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
80
|
+
Each message should have "role" and "content" keys. It supports single turn only.
|
|
81
|
+
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
82
|
+
:return: The hate unfairness score.
|
|
83
|
+
:rtype: Dict[str, Union[float, str]]
|
|
84
|
+
"""
|
|
85
|
+
return async_run_allowing_running_loop(self._async_evaluator, conversation=conversation, **kwargs)
|
|
86
|
+
|
|
87
|
+
def _to_async(self):
|
|
88
|
+
return self._async_evaluator
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class _AsyncHateUnfairnessMultimodalEvaluator(ContentSafetyMultimodalEvaluatorBase):
|
|
92
|
+
def __init__(self, credential, azure_ai_project):
|
|
93
|
+
super().__init__(
|
|
94
|
+
metric=EvaluationMetrics.HATE_FAIRNESS,
|
|
95
|
+
credential=credential,
|
|
96
|
+
azure_ai_project=azure_ai_project,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
async def __call__(self, *, conversation, **kwargs):
|
|
100
|
+
return await super().__call__(conversation=conversation, **kwargs)
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from promptflow._utils.async_utils import async_run_allowing_running_loop
|
|
5
|
+
from azure.ai.evaluation._common.constants import EvaluationMetrics
|
|
6
|
+
from azure.ai.evaluation._common.utils import validate_conversation
|
|
7
|
+
from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service_multimodal
|
|
8
|
+
from azure.ai.evaluation._common._experimental import experimental
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@experimental
|
|
12
|
+
class ProtectedMaterialMultimodalEvaluator:
|
|
13
|
+
"""
|
|
14
|
+
Initialize a protected materials evaluator to detect whether protected material
|
|
15
|
+
is present in multimodal messages. The evaluator outputs a Boolean label (`True` or `False`)
|
|
16
|
+
indicating the presence of protected material, along with AI-generated reasoning.
|
|
17
|
+
|
|
18
|
+
:param credential: The credential for connecting to the Azure AI project. This is required.
|
|
19
|
+
:type credential: ~azure.core.credentials.TokenCredential
|
|
20
|
+
:param azure_ai_project: The scope of the Azure AI project, containing the subscription ID,
|
|
21
|
+
resource group, and project name.
|
|
22
|
+
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
|
|
23
|
+
|
|
24
|
+
:return: A dictionary containing the evaluation result label and reasoning.
|
|
25
|
+
|
|
26
|
+
**Usage Example**
|
|
27
|
+
|
|
28
|
+
.. code-block:: python
|
|
29
|
+
|
|
30
|
+
azure_ai_project = {
|
|
31
|
+
"subscription_id": "<subscription_id>",
|
|
32
|
+
"resource_group_name": "<resource_group_name>",
|
|
33
|
+
"project_name": "<project_name>",
|
|
34
|
+
}
|
|
35
|
+
eval_fn = ProtectedMaterialMultimodalEvaluator(azure_ai_project)
|
|
36
|
+
result = eval_fn(conversation=
|
|
37
|
+
{
|
|
38
|
+
"messages": [
|
|
39
|
+
{
|
|
40
|
+
"role": "user",
|
|
41
|
+
"content": [
|
|
42
|
+
{
|
|
43
|
+
"type": "text",
|
|
44
|
+
"text": "What's in this image?"
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"type": "image_url",
|
|
48
|
+
"image_url": {
|
|
49
|
+
"url": "<image url or base64 encoded image>"
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
]
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"role": "assistant",
|
|
56
|
+
"content": "This picture shows an astronaut standing in the desert."
|
|
57
|
+
}
|
|
58
|
+
]
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
**Output Format**
|
|
63
|
+
|
|
64
|
+
.. code-block:: json
|
|
65
|
+
|
|
66
|
+
{
|
|
67
|
+
"protected_material_label": "False",
|
|
68
|
+
"protected_material_reason": "This query does not contain any protected material."
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
id = "protected_material_multimodal"
|
|
74
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
75
|
+
|
|
76
|
+
def __init__(
|
|
77
|
+
self,
|
|
78
|
+
credential,
|
|
79
|
+
azure_ai_project,
|
|
80
|
+
):
|
|
81
|
+
self._async_evaluator = _AsyncProtectedMaterialMultimodalEvaluator(credential, azure_ai_project)
|
|
82
|
+
|
|
83
|
+
def __call__(self, *, conversation, **kwargs):
|
|
84
|
+
"""
|
|
85
|
+
Evaluates protected materials content.
|
|
86
|
+
|
|
87
|
+
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
88
|
+
Each message should have "role" and "content" keys. It supports single turn only.
|
|
89
|
+
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
90
|
+
:return: A dictionary containing a boolean label and reasoning.
|
|
91
|
+
:rtype: Dict[str, str]
|
|
92
|
+
"""
|
|
93
|
+
return async_run_allowing_running_loop(self._async_evaluator, conversation=conversation, **kwargs)
|
|
94
|
+
|
|
95
|
+
def _to_async(self):
|
|
96
|
+
return self._async_evaluator
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class _AsyncProtectedMaterialMultimodalEvaluator:
|
|
100
|
+
def __init__(self, credential, azure_ai_project):
|
|
101
|
+
self._credential = credential
|
|
102
|
+
self._azure_ai_project = azure_ai_project
|
|
103
|
+
|
|
104
|
+
async def __call__(self, *, conversation, **kwargs):
|
|
105
|
+
"""
|
|
106
|
+
Evaluates content according to this evaluator's metric.
|
|
107
|
+
|
|
108
|
+
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
109
|
+
Each message should have "role" and "content" keys. It supports single turn only.
|
|
110
|
+
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
111
|
+
:return: The evaluation score computation based on the Content Safety metric (self.metric).
|
|
112
|
+
:rtype: Any
|
|
113
|
+
"""
|
|
114
|
+
# Validate inputs
|
|
115
|
+
validate_conversation(conversation)
|
|
116
|
+
messages = conversation["messages"]
|
|
117
|
+
# Run score computation based on supplied metric.
|
|
118
|
+
result = await evaluate_with_rai_service_multimodal(
|
|
119
|
+
messages=messages,
|
|
120
|
+
metric_name=EvaluationMetrics.PROTECTED_MATERIAL,
|
|
121
|
+
credential=self._credential,
|
|
122
|
+
project_scope=self._azure_ai_project,
|
|
123
|
+
)
|
|
124
|
+
return result
|