azure-ai-evaluation 0.0.0b0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. azure/ai/evaluation/__init__.py +82 -0
  2. azure/ai/evaluation/_common/__init__.py +16 -0
  3. azure/ai/evaluation/_common/_experimental.py +172 -0
  4. azure/ai/evaluation/_common/constants.py +72 -0
  5. azure/ai/evaluation/_common/math.py +89 -0
  6. azure/ai/evaluation/_common/rai_service.py +632 -0
  7. azure/ai/evaluation/_common/utils.py +445 -0
  8. azure/ai/evaluation/_constants.py +72 -0
  9. azure/ai/evaluation/_evaluate/__init__.py +3 -0
  10. azure/ai/evaluation/_evaluate/_batch_run/__init__.py +9 -0
  11. azure/ai/evaluation/_evaluate/_batch_run/code_client.py +188 -0
  12. azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +89 -0
  13. azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +99 -0
  14. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +46 -0
  15. azure/ai/evaluation/_evaluate/_eval_run.py +571 -0
  16. azure/ai/evaluation/_evaluate/_evaluate.py +850 -0
  17. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +179 -0
  18. azure/ai/evaluation/_evaluate/_utils.py +298 -0
  19. azure/ai/evaluation/_evaluators/__init__.py +3 -0
  20. azure/ai/evaluation/_evaluators/_bleu/__init__.py +9 -0
  21. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +72 -0
  22. azure/ai/evaluation/_evaluators/_coherence/__init__.py +7 -0
  23. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +107 -0
  24. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +99 -0
  25. azure/ai/evaluation/_evaluators/_common/__init__.py +13 -0
  26. azure/ai/evaluation/_evaluators/_common/_base_eval.py +344 -0
  27. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +88 -0
  28. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +133 -0
  29. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +17 -0
  30. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +144 -0
  31. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +129 -0
  32. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +123 -0
  33. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +125 -0
  34. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +126 -0
  35. azure/ai/evaluation/_evaluators/_eci/__init__.py +0 -0
  36. azure/ai/evaluation/_evaluators/_eci/_eci.py +89 -0
  37. azure/ai/evaluation/_evaluators/_f1_score/__init__.py +9 -0
  38. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +157 -0
  39. azure/ai/evaluation/_evaluators/_fluency/__init__.py +9 -0
  40. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +104 -0
  41. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +86 -0
  42. azure/ai/evaluation/_evaluators/_gleu/__init__.py +9 -0
  43. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +69 -0
  44. azure/ai/evaluation/_evaluators/_groundedness/__init__.py +9 -0
  45. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +144 -0
  46. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
  47. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
  48. azure/ai/evaluation/_evaluators/_meteor/__init__.py +9 -0
  49. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +90 -0
  50. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
  51. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +132 -0
  52. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +55 -0
  53. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +100 -0
  54. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +124 -0
  55. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +100 -0
  56. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +100 -0
  57. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +100 -0
  58. azure/ai/evaluation/_evaluators/_protected_material/__init__.py +5 -0
  59. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +113 -0
  60. azure/ai/evaluation/_evaluators/_qa/__init__.py +9 -0
  61. azure/ai/evaluation/_evaluators/_qa/_qa.py +93 -0
  62. azure/ai/evaluation/_evaluators/_relevance/__init__.py +9 -0
  63. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +114 -0
  64. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +100 -0
  65. azure/ai/evaluation/_evaluators/_retrieval/__init__.py +9 -0
  66. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +112 -0
  67. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +93 -0
  68. azure/ai/evaluation/_evaluators/_rouge/__init__.py +10 -0
  69. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +98 -0
  70. azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
  71. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +148 -0
  72. azure/ai/evaluation/_evaluators/_similarity/__init__.py +9 -0
  73. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +140 -0
  74. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +66 -0
  75. azure/ai/evaluation/_evaluators/_xpia/__init__.py +5 -0
  76. azure/ai/evaluation/_evaluators/_xpia/xpia.py +125 -0
  77. azure/ai/evaluation/_exceptions.py +128 -0
  78. azure/ai/evaluation/_http_utils.py +466 -0
  79. azure/ai/evaluation/_model_configurations.py +123 -0
  80. azure/ai/evaluation/_user_agent.py +6 -0
  81. azure/ai/evaluation/_vendor/__init__.py +3 -0
  82. azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
  83. azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +328 -0
  84. azure/ai/evaluation/_vendor/rouge_score/scoring.py +63 -0
  85. azure/ai/evaluation/_vendor/rouge_score/tokenize.py +63 -0
  86. azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
  87. azure/ai/evaluation/_version.py +5 -0
  88. azure/ai/evaluation/py.typed +0 -0
  89. azure/ai/evaluation/simulator/__init__.py +16 -0
  90. azure/ai/evaluation/simulator/_adversarial_scenario.py +46 -0
  91. azure/ai/evaluation/simulator/_adversarial_simulator.py +471 -0
  92. azure/ai/evaluation/simulator/_constants.py +27 -0
  93. azure/ai/evaluation/simulator/_conversation/__init__.py +316 -0
  94. azure/ai/evaluation/simulator/_conversation/_conversation.py +178 -0
  95. azure/ai/evaluation/simulator/_conversation/constants.py +30 -0
  96. azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
  97. azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
  98. azure/ai/evaluation/simulator/_direct_attack_simulator.py +218 -0
  99. azure/ai/evaluation/simulator/_helpers/__init__.py +4 -0
  100. azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +17 -0
  101. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +96 -0
  102. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +220 -0
  103. azure/ai/evaluation/simulator/_model_tools/__init__.py +23 -0
  104. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +195 -0
  105. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +244 -0
  106. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +168 -0
  107. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +201 -0
  108. azure/ai/evaluation/simulator/_model_tools/models.py +614 -0
  109. azure/ai/evaluation/simulator/_prompty/__init__.py +0 -0
  110. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +65 -0
  111. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +37 -0
  112. azure/ai/evaluation/simulator/_simulator.py +716 -0
  113. azure/ai/evaluation/simulator/_tracing.py +89 -0
  114. azure/ai/evaluation/simulator/_utils.py +132 -0
  115. azure_ai_evaluation-1.0.0.dist-info/METADATA +595 -0
  116. azure_ai_evaluation-1.0.0.dist-info/NOTICE.txt +70 -0
  117. azure_ai_evaluation-1.0.0.dist-info/RECORD +119 -0
  118. {azure_ai_evaluation-0.0.0b0.dist-info → azure_ai_evaluation-1.0.0.dist-info}/WHEEL +1 -1
  119. azure_ai_evaluation-1.0.0.dist-info/top_level.txt +1 -0
  120. azure_ai_evaluation-0.0.0b0.dist-info/METADATA +0 -7
  121. azure_ai_evaluation-0.0.0b0.dist-info/RECORD +0 -4
  122. azure_ai_evaluation-0.0.0b0.dist-info/top_level.txt +0 -1
@@ -0,0 +1,89 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from typing_extensions import overload, override
5
+
6
+ from azure.ai.evaluation._common._experimental import experimental
7
+ from azure.ai.evaluation._common.constants import _InternalEvaluationMetrics
8
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
9
+ from azure.ai.evaluation._model_configurations import Conversation
10
+
11
+
12
+ @experimental
13
+ class ECIEvaluator(RaiServiceEvaluatorBase):
14
+ """
15
+ Initialize an ECI evaluator to evaluate ECI based on the following guidelines:
16
+ Detects whether ECI is present without a disclaimer in the AI system’s response.
17
+ After the response is annotated by GPT using the binary annotation prompt
18
+ (which checks for disclaimer or authoritative sources),
19
+ we now want to check, if there was a disclaimer, the language should be exactly
20
+ "AI-generated content may be incorrect. If you are seeking ECI-related information, please go to Bing Search."
21
+ Outputs True or False with AI-generated reasoning.
22
+
23
+ :param credential: The credential for connecting to Azure AI project. Required
24
+ :type credential: ~azure.core.credentials.TokenCredential
25
+ :param azure_ai_project: The scope of the Azure AI project.
26
+ It contains subscription id, resource group, and project name.
27
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
28
+ :return: Whether or not ECI was found in the response without a disclaimer, with AI-generated reasoning
29
+ :rtype: Dict[str, str]
30
+
31
+ **Usage**
32
+
33
+ .. code-block:: python
34
+
35
+ azure_ai_project = {
36
+ "subscription_id": "<subscription_id>",
37
+ "resource_group_name": "<resource_group_name>",
38
+ "project_name": "<project_name>",
39
+ }
40
+ eval_fn = ECIEvaluator(azure_ai_project)
41
+ result = eval_fn(query="What is the capital of France?", response="Paris.")
42
+
43
+ **Output format**
44
+
45
+ .. code-block:: python
46
+
47
+ {
48
+ "eci_label": "False",
49
+ "eci_reason": "Some reason."
50
+ }
51
+ """
52
+
53
+ id = "eci"
54
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
55
+
56
+ @override
57
+ def __init__(
58
+ self,
59
+ credential,
60
+ azure_ai_project,
61
+ ):
62
+ super().__init__(
63
+ eval_metric=_InternalEvaluationMetrics.ECI,
64
+ azure_ai_project=azure_ai_project,
65
+ credential=credential,
66
+ )
67
+
68
+ @overload
69
+ def __call__(
70
+ self,
71
+ *,
72
+ query: str,
73
+ response: str,
74
+ ): ...
75
+
76
+ @overload
77
+ def __call__(
78
+ self,
79
+ *,
80
+ conversation: Conversation,
81
+ ): ...
82
+
83
+ @override
84
+ def __call__( # pylint: disable=docstring-missing-param
85
+ self,
86
+ *args,
87
+ **kwargs,
88
+ ):
89
+ return super().__call__(*args, **kwargs)
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._f1_score import F1ScoreEvaluator
6
+
7
+ __all__ = [
8
+ "F1ScoreEvaluator",
9
+ ]
@@ -0,0 +1,157 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from collections import Counter
6
+ from typing import List
7
+
8
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
9
+
10
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
11
+
12
+
13
+ class _AsyncF1ScoreEvaluator:
14
+ def __init__(self):
15
+ pass
16
+
17
+ async def __call__(self, *, response: str, ground_truth: str, **kwargs):
18
+ """
19
+ Evaluate F1 score.
20
+
21
+ :keyword response: The response to be evaluated.
22
+ :paramtype response: str
23
+ :keyword ground_truth: The ground truth to be evaluated.
24
+ :paramtype ground_truth: str
25
+ :return: The F1 score.
26
+ :rtype: Dict[str, float]
27
+ """
28
+ # Validate inputs
29
+ if not (response and response.strip() and response != "None") or not (
30
+ ground_truth and ground_truth.strip() and ground_truth != "None"
31
+ ):
32
+ msg = "Both 'response' and 'ground_truth' must be non-empty strings."
33
+ raise EvaluationException(
34
+ message=msg,
35
+ internal_message=msg,
36
+ error_category=ErrorCategory.MISSING_FIELD,
37
+ error_blame=ErrorBlame.USER_ERROR,
38
+ error_target=ErrorTarget.F1_EVALUATOR,
39
+ )
40
+
41
+ # Run f1 score computation.
42
+ f1_result = self._compute_f1_score(response=response, ground_truth=ground_truth)
43
+
44
+ return {"f1_score": f1_result}
45
+
46
+ @classmethod
47
+ def _compute_f1_score(cls, response: str, ground_truth: str) -> float:
48
+ import re
49
+ import string
50
+
51
+ class QASplitTokenizer:
52
+ """Quality assurance tokenizer that splits text on whitespace."""
53
+
54
+ def __call__(self, line) -> List[str]:
55
+ """Tokenizes an input line using split() on whitespace
56
+
57
+ :param line: The input segment to be tokenized
58
+ :type line: str
59
+ :return: The tokenized segment
60
+ :rtype: List[str]
61
+ """
62
+
63
+ return line.split()
64
+
65
+ def normalize_text(text: str) -> str:
66
+ """Lower text and remove punctuation, articles and extra whitespace.
67
+
68
+ :param text: The text to be normalized
69
+ :type text: str
70
+ :return: The normalized text
71
+ :rtype: str
72
+ """
73
+
74
+ def remove_articles(text):
75
+ return re.sub(r"\b(a|an|the)\b", " ", text)
76
+
77
+ def white_space_fix(text):
78
+ return " ".join(text.split())
79
+
80
+ def remove_punctuation(text):
81
+ exclude = set(string.punctuation)
82
+ return "".join(ch for ch in text if ch not in exclude)
83
+
84
+ def lower(text):
85
+ return text.lower()
86
+
87
+ return white_space_fix(remove_articles(remove_punctuation(lower(text))))
88
+
89
+ tokenizer = QASplitTokenizer()
90
+ prediction_tokens = tokenizer(normalize_text(response))
91
+ reference_tokens = tokenizer(normalize_text(ground_truth))
92
+
93
+ common_tokens = Counter(prediction_tokens) & Counter(reference_tokens)
94
+ num_common_tokens = sum(common_tokens.values())
95
+
96
+ if num_common_tokens == 0:
97
+ f1 = 0.0
98
+ else:
99
+ precision = 1.0 * num_common_tokens / len(prediction_tokens)
100
+ recall = 1.0 * num_common_tokens / len(reference_tokens)
101
+
102
+ f1 = (2.0 * precision * recall) / (precision + recall)
103
+
104
+ return f1
105
+
106
+
107
+ class F1ScoreEvaluator:
108
+ """
109
+ Calculates the F1 score for a given response and ground truth or a multi-turn conversation.
110
+
111
+ F1 Scores range from 0 to 1, with 1 being the best possible score.
112
+
113
+ The F1-score computes the ratio of the number of shared words between the model generation and
114
+ the ground truth. Ratio is computed over the individual words in the generated response against those in the ground
115
+ truth answer. The number of shared words between the generation and the truth is the basis of the F1 score:
116
+ precision is the ratio of the number of shared words to the total number of words in the generation, and recall
117
+ is the ratio of the number of shared words to the total number of words in the ground truth.
118
+
119
+ Use the F1 score when you want a single comprehensive metric that combines both recall and precision in your
120
+ model's responses. It provides a balanced evaluation of your model's performance in terms of capturing accurate
121
+ information in the response.
122
+
123
+
124
+ .. admonition:: Example:
125
+
126
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
127
+ :start-after: [START f1_score_evaluator]
128
+ :end-before: [END f1_score_evaluator]
129
+ :language: python
130
+ :dedent: 8
131
+ :caption: Initialize and call an F1ScoreEvaluator.
132
+ """
133
+
134
+ id = "azureml://registries/azureml/models/F1Score-Evaluator/versions/3"
135
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
136
+
137
+ def __init__(self):
138
+ self._async_evaluator = _AsyncF1ScoreEvaluator()
139
+
140
+ def __call__(self, *, response: str, ground_truth: str, **kwargs):
141
+ """
142
+ Evaluate F1 score.
143
+
144
+ :keyword response: The response to be evaluated.
145
+ :paramtype response: str
146
+ :keyword ground_truth: The ground truth to be evaluated.
147
+ :paramtype ground_truth: str
148
+ :return: The F1 score.
149
+ :rtype: Dict[str, float]
150
+ """
151
+
152
+ return async_run_allowing_running_loop(
153
+ self._async_evaluator, response=response, ground_truth=ground_truth, **kwargs
154
+ )
155
+
156
+ def _to_async(self):
157
+ return self._async_evaluator
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._fluency import FluencyEvaluator
6
+
7
+ __all__ = [
8
+ "FluencyEvaluator",
9
+ ]
@@ -0,0 +1,104 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ import os
6
+ from typing import Dict, List, Union
7
+
8
+ from typing_extensions import overload, override
9
+
10
+ from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
+ from azure.ai.evaluation._model_configurations import Conversation
12
+
13
+
14
+ class FluencyEvaluator(PromptyEvaluatorBase[Union[str, float]]):
15
+ """
16
+ Evaluates the fluency of a given response or a multi-turn conversation, including reasoning.
17
+
18
+ The fluency measure assesses the extent to which the generated text conforms to grammatical rules, syntactic
19
+ structures, and appropriate vocabulary usage, resulting in linguistically correct responses.
20
+
21
+ Fluency scores range from 1 to 5, with 1 being the least fluent and 5 being the most fluent.
22
+
23
+ :param model_config: Configuration for the Azure OpenAI model.
24
+ :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
25
+ ~azure.ai.evaluation.OpenAIModelConfiguration]
26
+
27
+ .. admonition:: Example:
28
+
29
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
30
+ :start-after: [START fluency_evaluator]
31
+ :end-before: [END fluency_evaluator]
32
+ :language: python
33
+ :dedent: 8
34
+ :caption: Initialize and call a FluencyEvaluator.
35
+
36
+ .. note::
37
+
38
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
39
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
40
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
41
+ """
42
+
43
+ _PROMPTY_FILE = "fluency.prompty"
44
+ _RESULT_KEY = "fluency"
45
+
46
+ id = "azureml://registries/azureml/models/Fluency-Evaluator/versions/4"
47
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
48
+
49
+ @override
50
+ def __init__(self, model_config):
51
+ current_dir = os.path.dirname(__file__)
52
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
53
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
54
+
55
+ @overload
56
+ def __call__(
57
+ self,
58
+ *,
59
+ response: str,
60
+ ) -> Dict[str, Union[str, float]]:
61
+ """Evaluate fluency in given response
62
+
63
+ :keyword response: The response to be evaluated.
64
+ :paramtype response: str
65
+ :return: The fluency score
66
+ :rtype: Dict[str, float]
67
+ """
68
+
69
+ @overload
70
+ def __call__(
71
+ self,
72
+ *,
73
+ conversation: Conversation,
74
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
75
+ """Evaluate fluency for a conversation
76
+
77
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
78
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
79
+ to be dictionaries with keys "content", "role", and possibly "context".
80
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
81
+ :return: The fluency score
82
+ :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
83
+ """
84
+
85
+ @override
86
+ def __call__( # pylint: disable=docstring-missing-param
87
+ self,
88
+ *args,
89
+ **kwargs,
90
+ ):
91
+ """
92
+ Evaluate fluency. Accepts either a response for a single evaluation,
93
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
94
+ the evaluator will aggregate the results of each turn.
95
+
96
+ :keyword response: The response to be evaluated. Mutually exclusive with the "conversation" parameter.
97
+ :paramtype response: Optional[str]
98
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
99
+ key "messages". Conversation turns are expected to be dictionaries with keys "content" and "role".
100
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
101
+ :return: The fluency score.
102
+ :rtype: Union[Dict[str, float], Dict[str, Union[float, Dict[str, List[float]]]]]
103
+ """
104
+ return super().__call__(*args, **kwargs)
@@ -0,0 +1,86 @@
1
+ ---
2
+ name: Fluency
3
+ description: Evaluates fluency score for QA scenario
4
+ model:
5
+ api: chat
6
+ parameters:
7
+ temperature: 0.0
8
+ max_tokens: 800
9
+ top_p: 1.0
10
+ presence_penalty: 0
11
+ frequency_penalty: 0
12
+ response_format:
13
+ type: text
14
+
15
+ inputs:
16
+ response:
17
+ type: string
18
+
19
+ ---
20
+ system:
21
+ # Instruction
22
+ ## Goal
23
+ ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
24
+ - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
25
+ - **Data**: Your input data include a RESPONSE.
26
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
27
+
28
+ user:
29
+ # Definition
30
+ **Fluency** refers to the effectiveness and clarity of written communication, focusing on grammatical accuracy, vocabulary range, sentence complexity, coherence, and overall readability. It assesses how smoothly ideas are conveyed and how easily the text can be understood by the reader.
31
+
32
+ # Ratings
33
+ ## [Fluency: 1] (Emergent Fluency)
34
+ **Definition:** The response shows minimal command of the language. It contains pervasive grammatical errors, extremely limited vocabulary, and fragmented or incoherent sentences. The message is largely incomprehensible, making understanding very difficult.
35
+
36
+ **Examples:**
37
+ **Response:** Free time I. Go park. Not fun. Alone.
38
+
39
+ **Response:** Like food pizza. Good cheese eat.
40
+
41
+ ## [Fluency: 2] (Basic Fluency)
42
+ **Definition:** The response communicates simple ideas but has frequent grammatical errors and limited vocabulary. Sentences are short and may be improperly constructed, leading to partial understanding. Repetition and awkward phrasing are common.
43
+
44
+ **Examples:**
45
+ **Response:** I like play soccer. I watch movie. It fun.
46
+
47
+ **Response:** My town small. Many people. We have market.
48
+
49
+ ## [Fluency: 3] (Competent Fluency)
50
+ **Definition:** The response clearly conveys ideas with occasional grammatical errors. Vocabulary is adequate but not extensive. Sentences are generally correct but may lack complexity and variety. The text is coherent, and the message is easily understood with minimal effort.
51
+
52
+ **Examples:**
53
+ **Response:** I'm planning to visit friends and maybe see a movie together.
54
+
55
+ **Response:** I try to eat healthy food and exercise regularly by jogging.
56
+
57
+ ## [Fluency: 4] (Proficient Fluency)
58
+ **Definition:** The response is well-articulated with good control of grammar and a varied vocabulary. Sentences are complex and well-structured, demonstrating coherence and cohesion. Minor errors may occur but do not affect overall understanding. The text flows smoothly, and ideas are connected logically.
59
+
60
+ **Examples:**
61
+ **Response:** My interest in mathematics and problem-solving inspired me to become an engineer, as I enjoy designing solutions that improve people's lives.
62
+
63
+ **Response:** Environmental conservation is crucial because it protects ecosystems, preserves biodiversity, and ensures natural resources are available for future generations.
64
+
65
+ ## [Fluency: 5] (Exceptional Fluency)
66
+ **Definition:** The response demonstrates an exceptional command of language with sophisticated vocabulary and complex, varied sentence structures. It is coherent, cohesive, and engaging, with precise and nuanced expression. Grammar is flawless, and the text reflects a high level of eloquence and style.
67
+
68
+ **Examples:**
69
+ **Response:** Globalization exerts a profound influence on cultural diversity by facilitating unprecedented cultural exchange while simultaneously risking the homogenization of distinct cultural identities, which can diminish the richness of global heritage.
70
+
71
+ **Response:** Technology revolutionizes modern education by providing interactive learning platforms, enabling personalized learning experiences, and connecting students worldwide, thereby transforming how knowledge is acquired and shared.
72
+
73
+
74
+ # Data
75
+ RESPONSE: {{response}}
76
+
77
+
78
+ # Tasks
79
+ ## Please provide your assessment Score for the previous RESPONSE based on the Definitions above. Your output should include the following information:
80
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
81
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
82
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
83
+
84
+
85
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
86
+ # Output
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._gleu import GleuScoreEvaluator
6
+
7
+ __all__ = [
8
+ "GleuScoreEvaluator",
9
+ ]
@@ -0,0 +1,69 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from nltk.translate.gleu_score import sentence_gleu
5
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
6
+
7
+ from azure.ai.evaluation._common.utils import nltk_tokenize
8
+
9
+
10
+ class _AsyncGleuScoreEvaluator:
11
+ def __init__(self):
12
+ pass
13
+
14
+ async def __call__(self, *, ground_truth: str, response: str, **kwargs):
15
+ reference_tokens = nltk_tokenize(ground_truth)
16
+ hypothesis_tokens = nltk_tokenize(response)
17
+
18
+ score = sentence_gleu([reference_tokens], hypothesis_tokens)
19
+
20
+ return {
21
+ "gleu_score": score,
22
+ }
23
+
24
+
25
+ class GleuScoreEvaluator:
26
+ """
27
+ Calculates the GLEU (Google-BLEU) score between a response and the ground truth.
28
+
29
+ The GLEU (Google-BLEU) score evaluator measures the similarity between generated and reference texts by
30
+ evaluating n-gram overlap, considering both precision and recall. This balanced evaluation, designed for
31
+ sentence-level assessment, makes it ideal for detailed analysis of translation quality. GLEU is well-suited for
32
+ use cases such as machine translation, text summarization, and text generation.
33
+
34
+ GLEU scores range from 0 to 1, where a value of 1 represents perfect overlap between the response and
35
+ the ground truth and a value of 0 indicates no overlap.
36
+
37
+ .. admonition:: Example:
38
+
39
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
40
+ :start-after: [START gleu_score_evaluator]
41
+ :end-before: [END gleu_score_evaluator]
42
+ :language: python
43
+ :dedent: 8
44
+ :caption: Initialize and call a GleuScoreEvaluator.
45
+ """
46
+
47
+ id = "azureml://registries/azureml/models/Gleu-Score-Evaluator/versions/3"
48
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
49
+
50
+ def __init__(self):
51
+ self._async_evaluator = _AsyncGleuScoreEvaluator()
52
+
53
+ def __call__(self, *, ground_truth: str, response: str, **kwargs):
54
+ """
55
+ Evaluate the GLEU score between the response and the ground truth.
56
+
57
+ :keyword response: The response to be evaluated.
58
+ :paramtype response: str
59
+ :keyword ground_truth: The ground truth to be compared against.
60
+ :paramtype ground_truth: str
61
+ :return: The GLEU score.
62
+ :rtype: Dict[str, float]
63
+ """
64
+ return async_run_allowing_running_loop(
65
+ self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
66
+ )
67
+
68
+ def _to_async(self):
69
+ return self._async_evaluator
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._groundedness import GroundednessEvaluator
6
+
7
+ __all__ = [
8
+ "GroundednessEvaluator",
9
+ ]