azure-ai-evaluation 1.9.0__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (64) hide show
  1. azure/ai/evaluation/__init__.py +46 -12
  2. azure/ai/evaluation/_aoai/python_grader.py +84 -0
  3. azure/ai/evaluation/_aoai/score_model_grader.py +1 -0
  4. azure/ai/evaluation/_common/rai_service.py +3 -3
  5. azure/ai/evaluation/_common/utils.py +74 -17
  6. azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +70 -22
  7. azure/ai/evaluation/_evaluate/_evaluate.py +150 -40
  8. azure/ai/evaluation/_evaluate/_evaluate_aoai.py +2 -0
  9. azure/ai/evaluation/_evaluate/_utils.py +1 -2
  10. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +1 -1
  11. azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +8 -1
  12. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +1 -1
  13. azure/ai/evaluation/_evaluators/_common/_base_eval.py +30 -6
  14. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +18 -8
  15. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +15 -5
  16. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +4 -1
  17. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +4 -1
  18. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +5 -2
  19. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +4 -1
  20. azure/ai/evaluation/_evaluators/_document_retrieval/_document_retrieval.py +3 -0
  21. azure/ai/evaluation/_evaluators/_eci/_eci.py +3 -0
  22. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +1 -1
  23. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +1 -1
  24. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +1 -1
  25. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +1 -1
  26. azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +1 -1
  27. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +1 -1
  28. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +8 -1
  29. azure/ai/evaluation/_evaluators/_qa/_qa.py +1 -1
  30. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +54 -2
  31. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +140 -59
  32. azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +1 -1
  33. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +1 -1
  34. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +1 -1
  35. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +2 -1
  36. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +1 -1
  37. azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +16 -10
  38. azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +354 -66
  39. azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +169 -186
  40. azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +101 -23
  41. azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +8 -1
  42. azure/ai/evaluation/_evaluators/_xpia/xpia.py +4 -1
  43. azure/ai/evaluation/_legacy/_batch_engine/_config.py +6 -3
  44. azure/ai/evaluation/_legacy/_batch_engine/_engine.py +115 -30
  45. azure/ai/evaluation/_legacy/_batch_engine/_result.py +2 -0
  46. azure/ai/evaluation/_legacy/_batch_engine/_run.py +2 -2
  47. azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +28 -31
  48. azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +2 -0
  49. azure/ai/evaluation/_version.py +1 -1
  50. azure/ai/evaluation/red_team/__init__.py +2 -2
  51. azure/ai/evaluation/red_team/_red_team.py +838 -478
  52. azure/ai/evaluation/red_team/_red_team_result.py +6 -0
  53. azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +8 -3
  54. azure/ai/evaluation/red_team/_utils/constants.py +0 -2
  55. azure/ai/evaluation/simulator/_adversarial_simulator.py +5 -2
  56. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +13 -1
  57. azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +2 -2
  58. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +20 -2
  59. azure/ai/evaluation/simulator/_simulator.py +12 -0
  60. {azure_ai_evaluation-1.9.0.dist-info → azure_ai_evaluation-1.10.0.dist-info}/METADATA +32 -3
  61. {azure_ai_evaluation-1.9.0.dist-info → azure_ai_evaluation-1.10.0.dist-info}/RECORD +64 -63
  62. {azure_ai_evaluation-1.9.0.dist-info → azure_ai_evaluation-1.10.0.dist-info}/NOTICE.txt +0 -0
  63. {azure_ai_evaluation-1.9.0.dist-info → azure_ai_evaluation-1.10.0.dist-info}/WHEEL +0 -0
  64. {azure_ai_evaluation-1.9.0.dist-info → azure_ai_evaluation-1.10.0.dist-info}/top_level.txt +0 -0
@@ -49,6 +49,9 @@ class DocumentRetrievalEvaluator(EvaluatorBase):
49
49
  :caption: Initialize with threshold and call a DocumentRetrievalEvaluator.
50
50
  """
51
51
 
52
+ id = "azureai://built-in/evaluators/document_retrieval"
53
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
54
+
52
55
  def __init__(
53
56
  self,
54
57
  *,
@@ -52,17 +52,20 @@ class ECIEvaluator(RaiServiceEvaluatorBase):
52
52
 
53
53
  id = "eci"
54
54
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
55
+ _OPTIONAL_PARAMS = ["query"]
55
56
 
56
57
  @override
57
58
  def __init__(
58
59
  self,
59
60
  credential,
60
61
  azure_ai_project,
62
+ **kwargs,
61
63
  ):
62
64
  super().__init__(
63
65
  eval_metric=_InternalEvaluationMetrics.ECI,
64
66
  azure_ai_project=azure_ai_project,
65
67
  credential=credential,
68
+ **kwargs,
66
69
  )
67
70
 
68
71
  @overload
@@ -58,7 +58,7 @@ class F1ScoreEvaluator(EvaluatorBase):
58
58
  :caption: Initialize with threshold and call an F1ScoreEvaluator.
59
59
  """
60
60
 
61
- id = "azureml://registries/azureml/models/F1Score-Evaluator/versions/3"
61
+ id = "azureai://built-in/evaluators/f1_score"
62
62
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
63
63
 
64
64
  def __init__(self, *, threshold=0.5):
@@ -64,7 +64,7 @@ class FluencyEvaluator(PromptyEvaluatorBase[Union[str, float]]):
64
64
  _PROMPTY_FILE = "fluency.prompty"
65
65
  _RESULT_KEY = "fluency"
66
66
 
67
- id = "azureml://registries/azureml/models/Fluency-Evaluator/versions/4"
67
+ id = "azureai://built-in/evaluators/fluency"
68
68
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
69
69
 
70
70
  @override
@@ -55,7 +55,7 @@ class GleuScoreEvaluator(EvaluatorBase):
55
55
  https://{resource_name}.services.ai.azure.com/api/projects/{project_name}
56
56
  """
57
57
 
58
- id = "azureml://registries/azureml/models/Gleu-Score-Evaluator/versions/3"
58
+ id = "azureai://built-in/evaluators/gleu_score"
59
59
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
60
60
 
61
61
  @override
@@ -79,7 +79,7 @@ class GroundednessEvaluator(PromptyEvaluatorBase[Union[str, float]]):
79
79
  _RESULT_KEY = "groundedness"
80
80
  _OPTIONAL_PARAMS = ["query"]
81
81
 
82
- id = "azureml://registries/azureml/models/Groundedness-Evaluator/versions/4"
82
+ id = "azureai://built-in/evaluators/groundedness"
83
83
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
84
84
 
85
85
  @override
@@ -57,7 +57,7 @@ class IntentResolutionEvaluator(PromptyEvaluatorBase[Union[str, float]]):
57
57
  _MAX_INTENT_RESOLUTION_SCORE = 5
58
58
  _DEFAULT_INTENT_RESOLUTION_THRESHOLD = 3
59
59
 
60
- id = None
60
+ id = "azureai://built-in/evaluators/intent_resolution"
61
61
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
62
62
 
63
63
  @override
@@ -65,7 +65,7 @@ class MeteorScoreEvaluator(EvaluatorBase):
65
65
  :caption: Initialize with threshold and call a MeteorScoreEvaluator.
66
66
  """
67
67
 
68
- id = "azureml://registries/azureml/models/Meteor-Score-Evaluator/versions/3"
68
+ id = "azureai://built-in/evaluators/meteor_score"
69
69
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
70
70
 
71
71
  @override
@@ -50,19 +50,26 @@ class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
50
50
 
51
51
  """
52
52
 
53
- id = "azureml://registries/azureml/models/Protected-Material-Evaluator/versions/3"
53
+ id = "azureai://built-in/evaluators/protected_material"
54
54
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
55
+ _OPTIONAL_PARAMS = ["query"]
55
56
 
56
57
  @override
57
58
  def __init__(
58
59
  self,
59
60
  credential,
60
61
  azure_ai_project,
62
+ **kwargs,
61
63
  ):
64
+ # Set default for evaluate_query if not provided
65
+ if "evaluate_query" not in kwargs:
66
+ kwargs["evaluate_query"] = True
67
+
62
68
  super().__init__(
63
69
  eval_metric=EvaluationMetrics.PROTECTED_MATERIAL,
64
70
  azure_ai_project=azure_ai_project,
65
71
  credential=credential,
72
+ **kwargs,
66
73
  )
67
74
 
68
75
  @overload
@@ -74,7 +74,7 @@ class QAEvaluator(MultiEvaluatorBase[Union[str, float]]):
74
74
  however, it is recommended to use the new keys moving forward as the old keys will be deprecated in the future.
75
75
  """
76
76
 
77
- id = "qa"
77
+ id = "azureai://built-in/evaluators/qa"
78
78
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
79
79
 
80
80
  def __init__(
@@ -1,15 +1,21 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
-
4
+ import logging
5
+ import math
5
6
  import os
6
7
  from typing import Dict, Union, List
7
8
 
8
9
  from typing_extensions import overload, override
9
10
 
11
+ from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
12
+ from ..._common.utils import reformat_conversation_history, reformat_agent_response
13
+
10
14
  from azure.ai.evaluation._model_configurations import Conversation
11
15
  from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
12
16
 
17
+ logger = logging.getLogger(__name__)
18
+
13
19
 
14
20
  class RelevanceEvaluator(PromptyEvaluatorBase):
15
21
  """
@@ -69,7 +75,7 @@ class RelevanceEvaluator(PromptyEvaluatorBase):
69
75
  _PROMPTY_FILE = "relevance.prompty"
70
76
  _RESULT_KEY = "relevance"
71
77
 
72
- id = "azureml://registries/azureml/models/Relevance-Evaluator/versions/4"
78
+ id = "azureai://built-in/evaluators/relevance"
73
79
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
74
80
 
75
81
  @override
@@ -141,3 +147,49 @@ class RelevanceEvaluator(PromptyEvaluatorBase):
141
147
  :rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
142
148
  """
143
149
  return super().__call__(*args, **kwargs)
150
+
151
+ async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[float, str]]: # type: ignore[override]
152
+ """Do a relevance evaluation.
153
+
154
+ :param eval_input: The input to the evaluator. Expected to contain
155
+ whatever inputs are needed for the _flow method, including context
156
+ and other fields depending on the child class.
157
+ :type eval_input: Dict
158
+ :return: The evaluation result.
159
+ :rtype: Dict
160
+ """
161
+ if "query" not in eval_input and "response" not in eval_input:
162
+ raise EvaluationException(
163
+ message="Only text conversation inputs are supported.",
164
+ internal_message="Only text conversation inputs are supported.",
165
+ blame=ErrorBlame.USER_ERROR,
166
+ category=ErrorCategory.INVALID_VALUE,
167
+ target=ErrorTarget.CONVERSATION,
168
+ )
169
+ if not isinstance(eval_input["query"], str):
170
+ eval_input["query"] = reformat_conversation_history(eval_input["query"], logger)
171
+ if not isinstance(eval_input["response"], str):
172
+ eval_input["response"] = reformat_agent_response(eval_input["response"], logger)
173
+ llm_output = await self._flow(timeout=self._LLM_CALL_TIMEOUT, **eval_input)
174
+ score = math.nan
175
+
176
+ if isinstance(llm_output, dict):
177
+ score = float(llm_output.get("score", math.nan))
178
+ reason = llm_output.get("explanation", "")
179
+ # Parse out score and reason from evaluators known to possess them.
180
+ binary_result = self._get_binary_result(score)
181
+ return {
182
+ self._result_key: float(score),
183
+ f"gpt_{self._result_key}": float(score),
184
+ f"{self._result_key}_reason": reason,
185
+ f"{self._result_key}_result": binary_result,
186
+ f"{self._result_key}_threshold": self._threshold,
187
+ }
188
+
189
+ binary_result = self._get_binary_result(score)
190
+ return {
191
+ self._result_key: float(score),
192
+ f"gpt_{self._result_key}": float(score),
193
+ f"{self._result_key}_result": binary_result,
194
+ f"{self._result_key}_threshold": self._threshold,
195
+ }
@@ -10,91 +10,172 @@ model:
10
10
  presence_penalty: 0
11
11
  frequency_penalty: 0
12
12
  response_format:
13
- type: text
13
+ type: json_object
14
14
 
15
15
  inputs:
16
16
  query:
17
17
  type: string
18
18
  response:
19
19
  type: string
20
-
21
20
  ---
21
+
22
22
  system:
23
- # Instruction
24
- ## Goal
25
- ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
26
- - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
27
- - **Data**: Your input data include QUERY and RESPONSE.
28
- - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
23
+ You are a Relevance-Judge, an impartial evaluator that scores how well the RESPONSE addresses the QUERY using the definitions provided.
29
24
 
30
25
  user:
31
- # Definition
32
- **Relevance** refers to how effectively a response addresses a question. It assesses the accuracy, completeness, and direct relevance of the response based solely on the given information.
33
-
34
- # Ratings
35
- ## [Relevance: 1] (Irrelevant Response)
36
- **Definition:** The response is unrelated to the question. It provides information that is off-topic and does not attempt to address the question posed.
37
-
38
- **Examples:**
39
- **Query:** What is the team preparing for?
40
- **Response:** I went grocery shopping yesterday evening.
41
-
42
- **Query:** When will the company's new product line launch?
43
- **Response:** International travel can be very rewarding and educational.
26
+ ROLE
27
+ ====
28
+ You are a Relevance Evaluator. Your task is to judge how relevant a RESPONSE is to a QUERY using the Relevance definitions provided.
44
29
 
45
- ## [Relevance: 2] (Incorrect Response)
46
- **Definition:** The response attempts to address the question but includes incorrect information. It provides a response that is factually wrong based on the provided information.
30
+ INPUT
31
+ =====
32
+ QUERY: {{query}}
33
+ RESPONSE: {{response}}
47
34
 
48
- **Examples:**
49
- **Query:** When was the merger between the two firms finalized?
50
- **Response:** The merger was finalized on April 10th.
35
+ TASK
36
+ ====
37
+ Output a JSON object with:
38
+ 1) a concise explanation of 15-60 words justifying your score based on how well the response is relevant to the query.
39
+ 2) an integer score from 1 (very poor) to 5 (excellent) using the rubric below.
51
40
 
52
- **Query:** Where and when will the solar eclipse be visible?
53
- **Response:** The solar eclipse will be visible in Asia on December 14th.
41
+ The explanation should always precede the score and should clearly justify the score based on the rubric definitions.
42
+ Response format exactly as follows:
54
43
 
55
- ## [Relevance: 3] (Incomplete Response)
56
- **Definition:** The response addresses the question but omits key details necessary for a full understanding. It provides a partial response that lacks essential information.
44
+ {
45
+ "explanation": "<15-60 words>",
46
+ "score": <1-5>
47
+ }
57
48
 
58
- **Examples:**
59
- **Query:** What type of food does the new restaurant offer?
60
- **Response:** The restaurant offers Italian food like pasta.
61
49
 
62
- **Query:** What topics will the conference cover?
63
- **Response:** The conference will cover renewable energy and climate change.
50
+ EVALUATION STEPS
51
+ ================
52
+ A. Read the QUERY and RESPONSE carefully.
53
+ B. Compare the RESPONSE against the rubric below:
54
+ - Does the response directly address the query?
55
+ - Is the information complete, partial, or off-topic?
56
+ - Is it vague, generic, or insightful?
57
+ C. Match the response to the best score from the rubric.
58
+ D. Provide a short explanation and the score using the required format.
64
59
 
65
- ## [Relevance: 4] (Complete Response)
66
- **Definition:** The response fully addresses the question with accurate and complete information. It includes all essential details required for a comprehensive understanding, without adding any extraneous information.
60
+ SCORING RUBRIC
61
+ ==============
67
62
 
68
- **Examples:**
69
- **Query:** What type of food does the new restaurant offer?
70
- **Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto.
63
+ ### Score 1 - Irrelevant Response
64
+ Definition: The response is unrelated to the question. It provides off-topic information and does not attempt to address the question posed.
71
65
 
72
- **Query:** What topics will the conference cover?
73
- **Response:** The conference will cover renewable energy, climate change, and sustainability practices.
66
+ **Example A**
67
+ QUERY: What is the team preparing for?
68
+ RESPONSE: I went grocery shopping yesterday evening.
74
69
 
75
- ## [Relevance: 5] (Comprehensive Response with Insights)
76
- **Definition:** The response not only fully and accurately addresses the question but also includes additional relevant insights or elaboration. It may explain the significance, implications, or provide minor inferences that enhance understanding.
70
+ Expected Output:
71
+ {
72
+ "explanation": "The response is entirely off-topic and doesn't address the question.",
73
+ "score": 1
74
+ }
77
75
 
78
- **Examples:**
79
- **Query:** What type of food does the new restaurant offer?
80
- **Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto, aiming to provide customers with an authentic Italian dining experience.
81
76
 
82
- **Query:** What topics will the conference cover?
83
- **Response:** The conference will cover renewable energy, climate change, and sustainability practices, bringing together global experts to discuss these critical issues.
77
+ **Example B**
78
+ QUERY: When will the company's new product line launch?
79
+ RESPONSE: International travel can be very rewarding and educational.
84
80
 
81
+ Expected Output:
82
+ {
83
+ "explanation": "The response is completely irrelevant to the product launch question.",
84
+ "score": 1
85
+ }
86
+
87
+
88
+ ### Score 2 – Related but Unhelpful / Superficial
89
+ Definition: The response is loosely or formally related to the query but fails to deliver any meaningful, specific, or useful information. This includes vague phrases, non-answers, or failure/error messages.
90
+
91
+ **Example A**
92
+ QUERY: What is the event about?
93
+ RESPONSE: It’s something important.
94
+
95
+ Expected Output:
96
+ {
97
+ "explanation": "The response vaguely refers to the query topic but lacks specific or informative content.",
98
+ "score": 2
99
+ }
100
+
101
+ **Example B**
102
+ QUERY: What’s the weather in Paris?
103
+ RESPONSE: I tried to find the forecast but the query failed.
104
+
105
+ Expected Output:
106
+ {
107
+ "explanation": "The response acknowledges the query but provides no usable weather information. It is related but unhelpful.",
108
+ "score": 2
109
+ }
110
+
111
+ ### Score 3 - Partially Relevant / Incomplete
112
+ Definition: The response addresses the query and includes relevant information, but omits essential components or detail. The answer is on-topic but insufficient to fully satisfy the request.
113
+
114
+ **Example A**
115
+ QUERY: What amenities does the new apartment complex provide?
116
+ RESPONSE: The apartment complex has a gym.
117
+
118
+ Expected Output:
119
+ {
120
+ "explanation": "The response mentions one amenity but does not provide a fuller list or clarify whether other standard features (like parking or security) are included. It partially addresses the query but lacks completeness.",
121
+ "score": 3
122
+ }
123
+
124
+ **Example B**
125
+ QUERY: What services does the premium membership include?
126
+ RESPONSE: It includes priority customer support.
127
+
128
+ Expected Output:
129
+ {
130
+ "explanation": "The response identifies one service but omits other likely components of a premium membership (e.g., exclusive content or discounts). The information is relevant but incomplete.",
131
+ "score": 3
132
+ }
133
+
134
+
135
+
136
+ ### Score 4 - Fully Relevant / Sufficient Response
137
+ Definition: The response fully addresses the question with accurate and sufficient information, covering all essential aspects. Very minor omissions are acceptable as long as the core information is intact and the intent is clearly conveyed.
138
+
139
+ **Example A**
140
+ QUERY: What amenities does the new apartment complex provide?
141
+ RESPONSE: The apartment complex provides a gym, swimming pool, and 24/7 security.
142
+
143
+ Expected Output:
144
+ {
145
+ "explanation": "The response mentions multiple key amenities likely to be relevant to most users. While it may not list every feature, it clearly conveys the core offerings of the complex.",
146
+ "score": 4
147
+ }
85
148
 
149
+ **Example B**
150
+ QUERY: What services does the premium membership include?
151
+ RESPONSE: The premium membership includes priority customer support, exclusive content access, and early product releases.
152
+
153
+ Expected Output:
154
+ {
155
+ "explanation": "The response outlines all major services expected from a premium membership. Even if a minor service is not mentioned, the core value is clearly and fully represented.",
156
+ "score": 4
157
+ }
86
158
 
87
- # Data
88
- QUERY: {{query}}
89
- RESPONSE: {{response}}
90
159
 
160
+ ### Score 5 - Comprehensive Response with Insights
161
+ Definition: The response not only fully and accurately answers the question, but also adds meaningful elaboration, interpretation, or context that enhances the user's understanding. This goes beyond just listing relevant details — it offers insight into why the information matters, how it's useful, or what impact it has.
91
162
 
92
- # Tasks
93
- ## Please provide your assessment Score for the previous RESPONSE in relation to the QUERY based on the Definitions above. Your output should include the following information:
94
- - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
95
- - **Explanation**: a very short explanation of why you think the input Data should get that Score.
96
- - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
163
+ **Example A**
164
+ QUERY: What amenities does the new apartment complex provide?
165
+ RESPONSE: The apartment complex provides a gym, swimming pool, and 24/7 security, designed to offer residents a comfortable and active lifestyle while ensuring their safety.
166
+
167
+ Expected Output:
168
+ {
169
+ "explanation": "The response fully lists key amenities and additionally explains how these features contribute to resident experience, enhancing the usefulness of the information.",
170
+ "score": 5
171
+ }
97
172
 
173
+ **Example B**
174
+ QUERY: What services does the premium membership include?
175
+ RESPONSE: The premium membership includes priority customer support, exclusive content access, and early product releases — tailored for users who want quicker resolutions and first access to new features.
98
176
 
99
- ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
100
- # Output
177
+ Expected Output:
178
+ {
179
+ "explanation": "The response covers all essential services and adds valuable insight about the target user and benefits, enriching the response beyond basic listing.",
180
+ "score": 5
181
+ }
@@ -64,7 +64,7 @@ class ResponseCompletenessEvaluator(PromptyEvaluatorBase[Union[str, float]]):
64
64
  _PROMPTY_FILE = "response_completeness.prompty"
65
65
  _RESULT_KEY = "response_completeness"
66
66
 
67
- id = "completeness"
67
+ id = "azureai://built-in/evaluators/response_completeness"
68
68
 
69
69
  _MIN_COMPLETENESS_SCORE = 1
70
70
  _MAX_COMPLETENESS_SCORE = 5
@@ -74,7 +74,7 @@ class RetrievalEvaluator(PromptyEvaluatorBase[Union[str, float]]):
74
74
  _PROMPTY_FILE = "retrieval.prompty"
75
75
  _RESULT_KEY = "retrieval"
76
76
 
77
- id = "azureml://registries/azureml/models/Retrieval-Evaluator/versions/1"
77
+ id = "azureai://built-in/evaluators/retrieval"
78
78
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
79
79
 
80
80
  @override
@@ -90,7 +90,7 @@ class RougeScoreEvaluator(EvaluatorBase):
90
90
  :caption: Initialize with a specified threshold and call a RougeScoreEvaluator with a four-gram rouge type.
91
91
  """
92
92
 
93
- id = "azureml://registries/azureml/models/Rouge-Score-Evaluator/versions/3"
93
+ id = "azureai://built-in/evaluators/rouge_score"
94
94
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
95
95
 
96
96
  @override
@@ -66,8 +66,9 @@ class GroundednessProEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
66
66
  for the groundedness pro label will be "groundedness_pro_passing_rate".
67
67
  """
68
68
 
69
- id = "azureml://registries/azureml/models/Groundedness-Pro-Evaluator/versions/1"
69
+ id = "azureai://built-in/evaluators/groundedness_pro"
70
70
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
71
+ _OPTIONAL_PARAMS = ["query"]
71
72
 
72
73
  @override
73
74
  def __init__(
@@ -71,7 +71,7 @@ class SimilarityEvaluator(PromptyEvaluatorBase):
71
71
  _PROMPTY_FILE = "similarity.prompty"
72
72
  _RESULT_KEY = "similarity"
73
73
 
74
- id = "similarity"
74
+ id = "azureai://built-in/evaluators/similarity"
75
75
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
76
76
 
77
77
  @override
@@ -3,16 +3,19 @@
3
3
  # ---------------------------------------------------------
4
4
  import os
5
5
  import math
6
+ import logging
6
7
  from typing import Dict, Union, List, Optional
7
8
 
8
9
  from typing_extensions import overload, override
9
10
 
10
11
  from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
11
12
  from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
12
- from azure.ai.evaluation._common.utils import parse_quality_evaluator_reason_score
13
+ from ..._common.utils import reformat_conversation_history, reformat_agent_response, reformat_tool_definitions
13
14
  from azure.ai.evaluation._model_configurations import Message
14
15
  from azure.ai.evaluation._common._experimental import experimental
15
16
 
17
+ logger = logging.getLogger(__name__)
18
+
16
19
 
17
20
  @experimental
18
21
  class TaskAdherenceEvaluator(PromptyEvaluatorBase[Union[str, float]]):
@@ -62,7 +65,7 @@ class TaskAdherenceEvaluator(PromptyEvaluatorBase[Union[str, float]]):
62
65
 
63
66
  _DEFAULT_TASK_ADHERENCE_SCORE = 3
64
67
 
65
- id = None
68
+ id = "azureai://built-in/evaluators/task_adherence"
66
69
  """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
67
70
 
68
71
  @override
@@ -140,20 +143,23 @@ class TaskAdherenceEvaluator(PromptyEvaluatorBase[Union[str, float]]):
140
143
  category=ErrorCategory.MISSING_FIELD,
141
144
  target=ErrorTarget.TASK_ADHERENCE_EVALUATOR,
142
145
  )
143
-
146
+ eval_input["query"] = reformat_conversation_history(eval_input["query"], logger, include_system_messages=True)
147
+ eval_input["response"] = reformat_agent_response(eval_input["response"], logger, include_tool_messages=True)
148
+ if "tool_definitions" in eval_input and eval_input["tool_definitions"] is not None:
149
+ eval_input["tool_definitions"] = reformat_tool_definitions(eval_input["tool_definitions"], logger)
144
150
  llm_output = await self._flow(timeout=self._LLM_CALL_TIMEOUT, **eval_input)
145
-
146
- score = math.nan
147
- if llm_output:
148
- score, reason = parse_quality_evaluator_reason_score(llm_output, valid_score_range="[1-5]")
149
-
151
+ if isinstance(llm_output, dict):
152
+ score = float(llm_output.get("score", math.nan))
150
153
  score_result = "pass" if score >= self.threshold else "fail"
151
-
154
+ reason = llm_output.get("explanation", "")
152
155
  return {
153
156
  f"{self._result_key}": score,
154
157
  f"{self._result_key}_result": score_result,
155
158
  f"{self._result_key}_threshold": self.threshold,
156
159
  f"{self._result_key}_reason": reason,
160
+ # Uncomment the following line in the next iteration after UI contracts are validated.
161
+ # f"{self._result_key}_additional_details": llm_output
157
162
  }
158
-
163
+ if logger:
164
+ logger.warning("LLM output is not a dictionary, returning NaN for the score.")
159
165
  return {self._result_key: math.nan}