azure-ai-evaluation 1.0.0__py3-none-any.whl → 1.0.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/__init__.py +5 -31
- azure/ai/evaluation/_common/constants.py +2 -9
- azure/ai/evaluation/_common/rai_service.py +120 -300
- azure/ai/evaluation/_common/utils.py +23 -381
- azure/ai/evaluation/_constants.py +6 -19
- azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/__init__.py +2 -3
- azure/ai/evaluation/_evaluate/{_batch_run/eval_run_context.py → _batch_run_client/batch_run_context.py} +7 -23
- azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/code_client.py +17 -33
- azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/proxy_client.py +4 -32
- azure/ai/evaluation/_evaluate/_eval_run.py +24 -81
- azure/ai/evaluation/_evaluate/_evaluate.py +239 -393
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +17 -17
- azure/ai/evaluation/_evaluate/_utils.py +28 -82
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +18 -17
- azure/ai/evaluation/_evaluators/{_retrieval → _chat}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_chat/_chat.py +357 -0
- azure/ai/evaluation/_evaluators/{_service_groundedness → _chat/retrieval}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +157 -0
- azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +48 -0
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +88 -78
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +39 -76
- azure/ai/evaluation/_evaluators/_content_safety/__init__.py +4 -0
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +67 -105
- azure/ai/evaluation/_evaluators/{_multimodal/_content_safety_multimodal_base.py → _content_safety/_content_safety_base.py} +34 -24
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +301 -0
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +54 -105
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +52 -99
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +52 -101
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +51 -101
- azure/ai/evaluation/_evaluators/_eci/_eci.py +54 -44
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +19 -34
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +89 -76
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +41 -66
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +16 -14
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +87 -113
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +54 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +27 -20
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +80 -89
- azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +104 -0
- azure/ai/evaluation/_evaluators/_qa/_qa.py +30 -23
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +96 -84
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +47 -78
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +27 -26
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +38 -53
- azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +5 -0
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +105 -91
- azure/ai/evaluation/_exceptions.py +7 -28
- azure/ai/evaluation/_http_utils.py +132 -203
- azure/ai/evaluation/_model_configurations.py +8 -104
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/simulator/__init__.py +1 -2
- azure/ai/evaluation/simulator/_adversarial_scenario.py +1 -20
- azure/ai/evaluation/simulator/_adversarial_simulator.py +92 -111
- azure/ai/evaluation/simulator/_constants.py +1 -11
- azure/ai/evaluation/simulator/_conversation/__init__.py +12 -13
- azure/ai/evaluation/simulator/_conversation/_conversation.py +4 -4
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +67 -33
- azure/ai/evaluation/simulator/_helpers/__init__.py +2 -1
- azure/ai/evaluation/{_common → simulator/_helpers}/_experimental.py +9 -24
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +5 -26
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +94 -107
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +22 -70
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +11 -28
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +4 -8
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +24 -68
- azure/ai/evaluation/simulator/_model_tools/models.py +10 -10
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +10 -6
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +5 -6
- azure/ai/evaluation/simulator/_simulator.py +207 -277
- azure/ai/evaluation/simulator/_tracing.py +4 -4
- azure/ai/evaluation/simulator/_utils.py +13 -31
- azure_ai_evaluation-1.0.0b2.dist-info/METADATA +449 -0
- azure_ai_evaluation-1.0.0b2.dist-info/RECORD +99 -0
- {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/WHEEL +1 -1
- azure/ai/evaluation/_common/math.py +0 -89
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +0 -46
- azure/ai/evaluation/_evaluators/_common/__init__.py +0 -13
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +0 -344
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +0 -88
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +0 -133
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +0 -113
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +0 -99
- azure/ai/evaluation/_evaluators/_multimodal/__init__.py +0 -20
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +0 -132
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +0 -124
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +0 -100
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +0 -112
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +0 -93
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +0 -148
- azure/ai/evaluation/_vendor/__init__.py +0 -3
- azure/ai/evaluation/_vendor/rouge_score/__init__.py +0 -14
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +0 -328
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +0 -63
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +0 -63
- azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +0 -53
- azure/ai/evaluation/simulator/_data_sources/__init__.py +0 -3
- azure/ai/evaluation/simulator/_data_sources/grounding.json +0 -1150
- azure_ai_evaluation-1.0.0.dist-info/METADATA +0 -595
- azure_ai_evaluation-1.0.0.dist-info/NOTICE.txt +0 -70
- azure_ai_evaluation-1.0.0.dist-info/RECORD +0 -119
- {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/top_level.txt +0 -0
|
@@ -3,102 +3,115 @@
|
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
5
|
import os
|
|
6
|
-
|
|
6
|
+
import re
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
import numpy as np
|
|
9
|
+
from promptflow._utils.async_utils import async_run_allowing_running_loop
|
|
10
|
+
from promptflow.core import AsyncPrompty
|
|
9
11
|
|
|
10
|
-
from azure.ai.evaluation.
|
|
11
|
-
from azure.ai.evaluation._model_configurations import Conversation
|
|
12
|
+
from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
|
|
12
13
|
|
|
14
|
+
from ..._common.utils import ensure_api_version_in_aoai_model_config, ensure_user_agent_in_aoai_model_config
|
|
13
15
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
16
|
+
try:
|
|
17
|
+
from ..._user_agent import USER_AGENT
|
|
18
|
+
except ImportError:
|
|
19
|
+
USER_AGENT = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class _AsyncFluencyEvaluator:
|
|
23
|
+
# Constants must be defined within eval's directory to be save/loadable
|
|
24
|
+
PROMPTY_FILE = "fluency.prompty"
|
|
25
|
+
LLM_CALL_TIMEOUT = 600
|
|
26
|
+
DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
|
|
27
|
+
|
|
28
|
+
def __init__(self, model_config: dict):
|
|
29
|
+
ensure_api_version_in_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
|
|
30
|
+
|
|
31
|
+
prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
|
|
32
|
+
|
|
33
|
+
# Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
|
|
34
|
+
# https://github.com/encode/httpx/discussions/2959
|
|
35
|
+
prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
|
|
36
|
+
|
|
37
|
+
ensure_user_agent_in_aoai_model_config(
|
|
38
|
+
model_config,
|
|
39
|
+
prompty_model_config,
|
|
40
|
+
USER_AGENT,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
current_dir = os.path.dirname(__file__)
|
|
44
|
+
prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
|
|
45
|
+
self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
|
|
46
|
+
|
|
47
|
+
async def __call__(self, *, query: str, response: str, **kwargs):
|
|
48
|
+
# Validate input parameters
|
|
49
|
+
query = str(query or "")
|
|
50
|
+
response = str(response or "")
|
|
51
|
+
|
|
52
|
+
if not (query.strip() and response.strip()):
|
|
53
|
+
msg = "Both 'query' and 'response' must be non-empty strings."
|
|
54
|
+
raise EvaluationException(
|
|
55
|
+
message=msg,
|
|
56
|
+
internal_message=msg,
|
|
57
|
+
error_category=ErrorCategory.MISSING_FIELD,
|
|
58
|
+
error_blame=ErrorBlame.USER_ERROR,
|
|
59
|
+
error_target=ErrorTarget.F1_EVALUATOR,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Run the evaluation flow
|
|
63
|
+
llm_output = await self._flow(query=query, response=response, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
|
|
64
|
+
|
|
65
|
+
score = np.nan
|
|
66
|
+
if llm_output:
|
|
67
|
+
match = re.search(r"\d", llm_output)
|
|
68
|
+
if match:
|
|
69
|
+
score = float(match.group())
|
|
17
70
|
|
|
18
|
-
|
|
19
|
-
structures, and appropriate vocabulary usage, resulting in linguistically correct responses.
|
|
71
|
+
return {"gpt_fluency": float(score)}
|
|
20
72
|
|
|
21
|
-
|
|
73
|
+
|
|
74
|
+
class FluencyEvaluator:
|
|
75
|
+
"""
|
|
76
|
+
Initialize a fluency evaluator configured for a specific Azure OpenAI model.
|
|
22
77
|
|
|
23
78
|
:param model_config: Configuration for the Azure OpenAI model.
|
|
24
79
|
:type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
|
|
25
80
|
~azure.ai.evaluation.OpenAIModelConfiguration]
|
|
26
81
|
|
|
27
|
-
|
|
82
|
+
**Usage**
|
|
28
83
|
|
|
29
|
-
|
|
30
|
-
:start-after: [START fluency_evaluator]
|
|
31
|
-
:end-before: [END fluency_evaluator]
|
|
32
|
-
:language: python
|
|
33
|
-
:dedent: 8
|
|
34
|
-
:caption: Initialize and call a FluencyEvaluator.
|
|
84
|
+
.. code-block:: python
|
|
35
85
|
|
|
36
|
-
|
|
86
|
+
eval_fn = FluencyEvaluator(model_config)
|
|
87
|
+
result = eval_fn(
|
|
88
|
+
query="What is the capital of Japan?",
|
|
89
|
+
response="The capital of Japan is Tokyo.")
|
|
37
90
|
|
|
38
|
-
|
|
39
|
-
To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
|
|
40
|
-
however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
|
|
41
|
-
"""
|
|
91
|
+
**Output format**
|
|
42
92
|
|
|
43
|
-
|
|
44
|
-
_RESULT_KEY = "fluency"
|
|
93
|
+
.. code-block:: python
|
|
45
94
|
|
|
46
|
-
|
|
47
|
-
|
|
95
|
+
{
|
|
96
|
+
"gpt_fluency": 4.0
|
|
97
|
+
}
|
|
98
|
+
"""
|
|
48
99
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
current_dir = os.path.dirname(__file__)
|
|
52
|
-
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
|
|
53
|
-
super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
|
|
100
|
+
def __init__(self, model_config: dict):
|
|
101
|
+
self._async_evaluator = _AsyncFluencyEvaluator(model_config)
|
|
54
102
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
*,
|
|
59
|
-
response: str,
|
|
60
|
-
) -> Dict[str, Union[str, float]]:
|
|
61
|
-
"""Evaluate fluency in given response
|
|
103
|
+
def __call__(self, *, query: str, response: str, **kwargs):
|
|
104
|
+
"""
|
|
105
|
+
Evaluate fluency.
|
|
62
106
|
|
|
107
|
+
:keyword query: The query to be evaluated.
|
|
108
|
+
:paramtype query: str
|
|
63
109
|
:keyword response: The response to be evaluated.
|
|
64
110
|
:paramtype response: str
|
|
65
|
-
:return: The fluency score
|
|
66
|
-
:rtype: Dict[str, float]
|
|
67
|
-
"""
|
|
68
|
-
|
|
69
|
-
@overload
|
|
70
|
-
def __call__(
|
|
71
|
-
self,
|
|
72
|
-
*,
|
|
73
|
-
conversation: Conversation,
|
|
74
|
-
) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
|
|
75
|
-
"""Evaluate fluency for a conversation
|
|
76
|
-
|
|
77
|
-
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
78
|
-
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
79
|
-
to be dictionaries with keys "content", "role", and possibly "context".
|
|
80
|
-
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
81
|
-
:return: The fluency score
|
|
82
|
-
:rtype: Dict[str, Union[float, Dict[str, List[float]]]]
|
|
83
|
-
"""
|
|
84
|
-
|
|
85
|
-
@override
|
|
86
|
-
def __call__( # pylint: disable=docstring-missing-param
|
|
87
|
-
self,
|
|
88
|
-
*args,
|
|
89
|
-
**kwargs,
|
|
90
|
-
):
|
|
91
|
-
"""
|
|
92
|
-
Evaluate fluency. Accepts either a response for a single evaluation,
|
|
93
|
-
or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
|
|
94
|
-
the evaluator will aggregate the results of each turn.
|
|
95
|
-
|
|
96
|
-
:keyword response: The response to be evaluated. Mutually exclusive with the "conversation" parameter.
|
|
97
|
-
:paramtype response: Optional[str]
|
|
98
|
-
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
99
|
-
key "messages". Conversation turns are expected to be dictionaries with keys "content" and "role".
|
|
100
|
-
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
101
111
|
:return: The fluency score.
|
|
102
|
-
:rtype:
|
|
112
|
+
:rtype: dict
|
|
103
113
|
"""
|
|
104
|
-
return
|
|
114
|
+
return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
|
|
115
|
+
|
|
116
|
+
def _to_async(self):
|
|
117
|
+
return self._async_evaluator
|
|
@@ -3,9 +3,14 @@ name: Fluency
|
|
|
3
3
|
description: Evaluates fluency score for QA scenario
|
|
4
4
|
model:
|
|
5
5
|
api: chat
|
|
6
|
+
configuration:
|
|
7
|
+
type: azure_openai
|
|
8
|
+
azure_deployment: ${env:AZURE_DEPLOYMENT}
|
|
9
|
+
api_key: ${env:AZURE_OPENAI_API_KEY}
|
|
10
|
+
azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
|
|
6
11
|
parameters:
|
|
7
12
|
temperature: 0.0
|
|
8
|
-
max_tokens:
|
|
13
|
+
max_tokens: 1
|
|
9
14
|
top_p: 1.0
|
|
10
15
|
presence_penalty: 0
|
|
11
16
|
frequency_penalty: 0
|
|
@@ -13,74 +18,44 @@ model:
|
|
|
13
18
|
type: text
|
|
14
19
|
|
|
15
20
|
inputs:
|
|
21
|
+
query:
|
|
22
|
+
type: string
|
|
16
23
|
response:
|
|
17
24
|
type: string
|
|
18
25
|
|
|
19
26
|
---
|
|
20
27
|
system:
|
|
21
|
-
|
|
22
|
-
## Goal
|
|
23
|
-
### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
|
|
24
|
-
- **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
|
|
25
|
-
- **Data**: Your input data include a RESPONSE.
|
|
26
|
-
- **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
|
|
27
|
-
|
|
28
|
+
You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. You should return a single integer value between 1 to 5 representing the evaluation metric. You will include no other text or information.
|
|
28
29
|
user:
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
**Response:** My interest in mathematics and problem-solving inspired me to become an engineer, as I enjoy designing solutions that improve people's lives.
|
|
62
|
-
|
|
63
|
-
**Response:** Environmental conservation is crucial because it protects ecosystems, preserves biodiversity, and ensures natural resources are available for future generations.
|
|
64
|
-
|
|
65
|
-
## [Fluency: 5] (Exceptional Fluency)
|
|
66
|
-
**Definition:** The response demonstrates an exceptional command of language with sophisticated vocabulary and complex, varied sentence structures. It is coherent, cohesive, and engaging, with precise and nuanced expression. Grammar is flawless, and the text reflects a high level of eloquence and style.
|
|
67
|
-
|
|
68
|
-
**Examples:**
|
|
69
|
-
**Response:** Globalization exerts a profound influence on cultural diversity by facilitating unprecedented cultural exchange while simultaneously risking the homogenization of distinct cultural identities, which can diminish the richness of global heritage.
|
|
70
|
-
|
|
71
|
-
**Response:** Technology revolutionizes modern education by providing interactive learning platforms, enabling personalized learning experiences, and connecting students worldwide, thereby transforming how knowledge is acquired and shared.
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
# Data
|
|
75
|
-
RESPONSE: {{response}}
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
# Tasks
|
|
79
|
-
## Please provide your assessment Score for the previous RESPONSE based on the Definitions above. Your output should include the following information:
|
|
80
|
-
- **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
|
|
81
|
-
- **Explanation**: a very short explanation of why you think the input Data should get that Score.
|
|
82
|
-
- **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
|
|
86
|
-
# Output
|
|
30
|
+
Fluency measures the quality of individual sentences in the answer, and whether they are well-written and grammatically correct. Consider the quality of individual sentences when evaluating fluency. Given the question and answer, score the fluency of the answer between one to five stars using the following rating scale:
|
|
31
|
+
One star: the answer completely lacks fluency
|
|
32
|
+
Two stars: the answer mostly lacks fluency
|
|
33
|
+
Three stars: the answer is partially fluent
|
|
34
|
+
Four stars: the answer is mostly fluent
|
|
35
|
+
Five stars: the answer has perfect fluency
|
|
36
|
+
|
|
37
|
+
This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5.
|
|
38
|
+
|
|
39
|
+
question: What did you have for breakfast today?
|
|
40
|
+
answer: Breakfast today, me eating cereal and orange juice very good.
|
|
41
|
+
stars: 1
|
|
42
|
+
|
|
43
|
+
question: How do you feel when you travel alone?
|
|
44
|
+
answer: Alone travel, nervous, but excited also. I feel adventure and like its time.
|
|
45
|
+
stars: 2
|
|
46
|
+
|
|
47
|
+
question: When was the last time you went on a family vacation?
|
|
48
|
+
answer: Last family vacation, it took place in last summer. We traveled to a beach destination, very fun.
|
|
49
|
+
stars: 3
|
|
50
|
+
|
|
51
|
+
question: What is your favorite thing about your job?
|
|
52
|
+
answer: My favorite aspect of my job is the chance to interact with diverse people. I am constantly learning from their experiences and stories.
|
|
53
|
+
stars: 4
|
|
54
|
+
|
|
55
|
+
question: Can you describe your morning routine?
|
|
56
|
+
answer: Every morning, I wake up at 6 am, drink a glass of water, and do some light stretching. After that, I take a shower and get dressed for work. Then, I have a healthy breakfast, usually consisting of oatmeal and fruits, before leaving the house around 7:30 am.
|
|
57
|
+
stars: 5
|
|
58
|
+
|
|
59
|
+
question: {{query}}
|
|
60
|
+
answer: {{response}}
|
|
61
|
+
stars:
|
|
@@ -24,28 +24,30 @@ class _AsyncGleuScoreEvaluator:
|
|
|
24
24
|
|
|
25
25
|
class GleuScoreEvaluator:
|
|
26
26
|
"""
|
|
27
|
-
|
|
27
|
+
Evaluator that computes the BLEU Score between two strings.
|
|
28
28
|
|
|
29
29
|
The GLEU (Google-BLEU) score evaluator measures the similarity between generated and reference texts by
|
|
30
30
|
evaluating n-gram overlap, considering both precision and recall. This balanced evaluation, designed for
|
|
31
31
|
sentence-level assessment, makes it ideal for detailed analysis of translation quality. GLEU is well-suited for
|
|
32
32
|
use cases such as machine translation, text summarization, and text generation.
|
|
33
33
|
|
|
34
|
-
|
|
35
|
-
the ground truth and a value of 0 indicates no overlap.
|
|
34
|
+
**Usage**
|
|
36
35
|
|
|
37
|
-
..
|
|
36
|
+
.. code-block:: python
|
|
38
37
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
38
|
+
eval_fn = GleuScoreEvaluator()
|
|
39
|
+
result = eval_fn(
|
|
40
|
+
response="Tokyo is the capital of Japan.",
|
|
41
|
+
ground_truth="The capital of Japan is Tokyo.")
|
|
42
|
+
|
|
43
|
+
**Output format**
|
|
44
|
+
|
|
45
|
+
.. code-block:: python
|
|
46
46
|
|
|
47
|
-
|
|
48
|
-
|
|
47
|
+
{
|
|
48
|
+
"gleu_score": 0.41
|
|
49
|
+
}
|
|
50
|
+
"""
|
|
49
51
|
|
|
50
52
|
def __init__(self):
|
|
51
53
|
self._async_evaluator = _AsyncGleuScoreEvaluator()
|
|
@@ -59,7 +61,7 @@ class GleuScoreEvaluator:
|
|
|
59
61
|
:keyword ground_truth: The ground truth to be compared against.
|
|
60
62
|
:paramtype ground_truth: str
|
|
61
63
|
:return: The GLEU score.
|
|
62
|
-
:rtype:
|
|
64
|
+
:rtype: dict
|
|
63
65
|
"""
|
|
64
66
|
return async_run_allowing_running_loop(
|
|
65
67
|
self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
|
|
@@ -1,144 +1,118 @@
|
|
|
1
1
|
# ---------------------------------------------------------
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
|
+
|
|
4
5
|
import os
|
|
5
|
-
|
|
6
|
+
import re
|
|
6
7
|
|
|
7
|
-
|
|
8
|
+
import numpy as np
|
|
9
|
+
from promptflow._utils.async_utils import async_run_allowing_running_loop
|
|
8
10
|
from promptflow.core import AsyncPrompty
|
|
9
11
|
|
|
10
|
-
from azure.ai.evaluation.
|
|
11
|
-
|
|
12
|
-
from ..._common.utils import
|
|
12
|
+
from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
|
|
13
|
+
|
|
14
|
+
from ..._common.utils import ensure_api_version_in_aoai_model_config, ensure_user_agent_in_aoai_model_config
|
|
13
15
|
|
|
14
16
|
try:
|
|
15
17
|
from ..._user_agent import USER_AGENT
|
|
16
18
|
except ImportError:
|
|
17
|
-
USER_AGENT =
|
|
19
|
+
USER_AGENT = None
|
|
18
20
|
|
|
19
21
|
|
|
20
|
-
class
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
22
|
+
class _AsyncGroundednessEvaluator:
|
|
23
|
+
# Constants must be defined within eval's directory to be save/loadable
|
|
24
|
+
PROMPTY_FILE = "groundedness.prompty"
|
|
25
|
+
LLM_CALL_TIMEOUT = 600
|
|
26
|
+
DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
|
|
24
27
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
factually correct, they'll be considered ungrounded if they can't be verified against the provided sources
|
|
28
|
-
(such as your input source or your database). Use the groundedness metric when you need to verify that
|
|
29
|
-
AI-generated responses align with and are validated by the provided context.
|
|
28
|
+
def __init__(self, model_config: dict):
|
|
29
|
+
ensure_api_version_in_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
|
|
30
30
|
|
|
31
|
-
|
|
31
|
+
prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
|
|
32
32
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
33
|
+
# Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
|
|
34
|
+
# https://github.com/encode/httpx/discussions/2959
|
|
35
|
+
prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
|
|
36
36
|
|
|
37
|
-
|
|
37
|
+
ensure_user_agent_in_aoai_model_config(
|
|
38
|
+
model_config,
|
|
39
|
+
prompty_model_config,
|
|
40
|
+
USER_AGENT,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
current_dir = os.path.dirname(__file__)
|
|
44
|
+
prompty_path = os.path.join(current_dir, "groundedness.prompty")
|
|
45
|
+
self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
|
|
46
|
+
|
|
47
|
+
async def __call__(self, *, response: str, context: str, **kwargs):
|
|
48
|
+
# Validate input parameters
|
|
49
|
+
response = str(response or "")
|
|
50
|
+
context = str(context or "")
|
|
51
|
+
|
|
52
|
+
if not response.strip() or not context.strip():
|
|
53
|
+
msg = "Both 'response' and 'context' must be non-empty strings."
|
|
54
|
+
raise EvaluationException(
|
|
55
|
+
message=msg,
|
|
56
|
+
internal_message=msg,
|
|
57
|
+
error_category=ErrorCategory.MISSING_FIELD,
|
|
58
|
+
error_blame=ErrorBlame.USER_ERROR,
|
|
59
|
+
error_target=ErrorTarget.F1_EVALUATOR,
|
|
60
|
+
)
|
|
38
61
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
:end-before: [END groundedness_evaluator]
|
|
42
|
-
:language: python
|
|
43
|
-
:dedent: 8
|
|
44
|
-
:caption: Initialize and call a GroundednessEvaluator.
|
|
62
|
+
# Run the evaluation flow
|
|
63
|
+
llm_output = await self._flow(response=response, context=context, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
|
|
45
64
|
|
|
46
|
-
|
|
65
|
+
score = np.nan
|
|
66
|
+
if llm_output:
|
|
67
|
+
match = re.search(r"\d", llm_output)
|
|
68
|
+
if match:
|
|
69
|
+
score = float(match.group())
|
|
47
70
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
71
|
+
return {"gpt_groundedness": float(score)}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class GroundednessEvaluator:
|
|
51
75
|
"""
|
|
76
|
+
Initialize a groundedness evaluator configured for a specific Azure OpenAI model.
|
|
77
|
+
|
|
78
|
+
:param model_config: Configuration for the Azure OpenAI model.
|
|
79
|
+
:type model_config: Union[~azure.ai.evalation.AzureOpenAIModelConfiguration,
|
|
80
|
+
~azure.ai.evalation.OpenAIModelConfiguration]
|
|
52
81
|
|
|
53
|
-
|
|
54
|
-
_PROMPTY_FILE_WITH_QUERY = "groundedness_with_query.prompty"
|
|
55
|
-
_RESULT_KEY = "groundedness"
|
|
56
|
-
_OPTIONAL_PARAMS = ["query"]
|
|
82
|
+
**Usage**
|
|
57
83
|
|
|
58
|
-
|
|
59
|
-
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
84
|
+
.. code-block:: python
|
|
60
85
|
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
86
|
+
eval_fn = GroundednessEvaluator(model_config)
|
|
87
|
+
result = eval_fn(
|
|
88
|
+
response="The capital of Japan is Tokyo.",
|
|
89
|
+
context="Tokyo is Japan's capital, known for its blend of traditional culture \
|
|
90
|
+
and technological advancements.")
|
|
91
|
+
|
|
92
|
+
**Output format**
|
|
93
|
+
|
|
94
|
+
.. code-block:: python
|
|
95
|
+
|
|
96
|
+
{
|
|
97
|
+
"gpt_groundedness": 5
|
|
98
|
+
}
|
|
99
|
+
"""
|
|
100
|
+
|
|
101
|
+
def __init__(self, model_config: dict):
|
|
102
|
+
self._async_evaluator = _AsyncGroundednessEvaluator(model_config)
|
|
103
|
+
|
|
104
|
+
def __call__(self, *, response: str, context: str, **kwargs):
|
|
105
|
+
"""
|
|
106
|
+
Evaluate groundedness of the response in the context.
|
|
79
107
|
|
|
80
108
|
:keyword response: The response to be evaluated.
|
|
81
109
|
:paramtype response: str
|
|
82
|
-
:keyword context: The context
|
|
110
|
+
:keyword context: The context in which the response is evaluated.
|
|
83
111
|
:paramtype context: str
|
|
84
|
-
:keyword query: The query to be evaluated. Optional parameter for use with the `response`
|
|
85
|
-
and `context` parameters. If provided, a different prompt template will be used for evaluation.
|
|
86
|
-
:paramtype query: Optional[str]
|
|
87
112
|
:return: The groundedness score.
|
|
88
|
-
:rtype:
|
|
113
|
+
:rtype: dict
|
|
89
114
|
"""
|
|
115
|
+
return async_run_allowing_running_loop(self._async_evaluator, response=response, context=context, **kwargs)
|
|
90
116
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
self,
|
|
94
|
-
*,
|
|
95
|
-
conversation: Conversation,
|
|
96
|
-
) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
|
|
97
|
-
"""Evaluate groundedness for a conversation
|
|
98
|
-
|
|
99
|
-
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
100
|
-
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
101
|
-
to be dictionaries with keys "content", "role", and possibly "context".
|
|
102
|
-
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
103
|
-
:return: The groundedness score.
|
|
104
|
-
:rtype: Dict[str, Union[float, Dict[str, List[float]]]]
|
|
105
|
-
"""
|
|
106
|
-
|
|
107
|
-
@override
|
|
108
|
-
def __call__( # pylint: disable=docstring-missing-param
|
|
109
|
-
self,
|
|
110
|
-
*args,
|
|
111
|
-
**kwargs,
|
|
112
|
-
):
|
|
113
|
-
"""Evaluate groundedness. Accepts either a query, response, and context for a single evaluation,
|
|
114
|
-
or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
|
|
115
|
-
the evaluator will aggregate the results of each turn.
|
|
116
|
-
|
|
117
|
-
:keyword query: The query to be evaluated. Mutually exclusive with `conversation`. Optional parameter for use
|
|
118
|
-
with the `response` and `context` parameters. If provided, a different prompt template will be used for
|
|
119
|
-
evaluation.
|
|
120
|
-
:paramtype query: Optional[str]
|
|
121
|
-
:keyword response: The response to be evaluated. Mutually exclusive with the `conversation` parameter.
|
|
122
|
-
:paramtype response: Optional[str]
|
|
123
|
-
:keyword context: The context to be evaluated. Mutually exclusive with the `conversation` parameter.
|
|
124
|
-
:paramtype context: Optional[str]
|
|
125
|
-
:keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
|
|
126
|
-
key "messages", and potentially a global context under the key "context". Conversation turns are expected
|
|
127
|
-
to be dictionaries with keys "content", "role", and possibly "context".
|
|
128
|
-
:paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
|
|
129
|
-
:return: The relevance score.
|
|
130
|
-
:rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
|
|
131
|
-
"""
|
|
132
|
-
|
|
133
|
-
if kwargs.get("query", None):
|
|
134
|
-
current_dir = os.path.dirname(__file__)
|
|
135
|
-
prompty_path = os.path.join(current_dir, self._PROMPTY_FILE_WITH_QUERY)
|
|
136
|
-
self._prompty_file = prompty_path
|
|
137
|
-
prompty_model_config = construct_prompty_model_config(
|
|
138
|
-
validate_model_config(self._model_config),
|
|
139
|
-
self._DEFAULT_OPEN_API_VERSION,
|
|
140
|
-
USER_AGENT,
|
|
141
|
-
)
|
|
142
|
-
self._flow = AsyncPrompty.load(source=self._prompty_file, model=prompty_model_config)
|
|
143
|
-
|
|
144
|
-
return super().__call__(*args, **kwargs)
|
|
117
|
+
def _to_async(self):
|
|
118
|
+
return self._async_evaluator
|