azure-ai-evaluation 1.0.0b4__py3-none-any.whl → 1.0.0b5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (79) hide show
  1. azure/ai/evaluation/__init__.py +22 -0
  2. azure/ai/evaluation/_common/constants.py +5 -0
  3. azure/ai/evaluation/_common/math.py +11 -0
  4. azure/ai/evaluation/_common/rai_service.py +172 -35
  5. azure/ai/evaluation/_common/utils.py +162 -23
  6. azure/ai/evaluation/_constants.py +6 -6
  7. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/__init__.py +3 -2
  8. azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +4 -4
  9. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/proxy_client.py +6 -3
  10. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +35 -0
  11. azure/ai/evaluation/_evaluate/_eval_run.py +21 -4
  12. azure/ai/evaluation/_evaluate/_evaluate.py +267 -139
  13. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +5 -5
  14. azure/ai/evaluation/_evaluate/_utils.py +40 -7
  15. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +1 -1
  16. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +14 -9
  17. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -34
  18. azure/ai/evaluation/_evaluators/_common/_base_eval.py +20 -19
  19. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +18 -8
  20. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +48 -9
  21. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +56 -19
  22. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +5 -5
  23. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +30 -1
  24. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +30 -1
  25. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +30 -1
  26. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +30 -1
  27. azure/ai/evaluation/_evaluators/_eci/_eci.py +3 -1
  28. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +20 -20
  29. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -36
  30. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +1 -1
  31. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +49 -15
  32. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
  33. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
  34. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +3 -7
  35. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
  36. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +130 -0
  37. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +57 -0
  38. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +96 -0
  39. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +120 -0
  40. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +96 -0
  41. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +96 -0
  42. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +96 -0
  43. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +44 -11
  44. azure/ai/evaluation/_evaluators/_qa/_qa.py +7 -3
  45. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +21 -19
  46. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +78 -42
  47. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +125 -82
  48. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +74 -24
  49. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +2 -2
  50. azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
  51. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +150 -0
  52. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +17 -14
  53. azure/ai/evaluation/_evaluators/_xpia/xpia.py +32 -5
  54. azure/ai/evaluation/_exceptions.py +17 -0
  55. azure/ai/evaluation/_model_configurations.py +18 -1
  56. azure/ai/evaluation/_version.py +1 -1
  57. azure/ai/evaluation/simulator/__init__.py +2 -1
  58. azure/ai/evaluation/simulator/_adversarial_scenario.py +5 -0
  59. azure/ai/evaluation/simulator/_adversarial_simulator.py +4 -1
  60. azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
  61. azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
  62. azure/ai/evaluation/simulator/_direct_attack_simulator.py +1 -1
  63. azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
  64. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +22 -1
  65. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +79 -34
  66. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +1 -1
  67. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -4
  68. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -1
  69. azure/ai/evaluation/simulator/_simulator.py +115 -61
  70. azure/ai/evaluation/simulator/_utils.py +6 -6
  71. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/METADATA +166 -9
  72. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/NOTICE.txt +20 -0
  73. azure_ai_evaluation-1.0.0b5.dist-info/RECORD +120 -0
  74. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/WHEEL +1 -1
  75. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
  76. azure_ai_evaluation-1.0.0b4.dist-info/RECORD +0 -106
  77. /azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +0 -0
  78. /azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +0 -0
  79. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ model:
5
5
  api: chat
6
6
  parameters:
7
7
  temperature: 0.0
8
- max_tokens: 1
8
+ max_tokens: 800
9
9
  top_p: 1.0
10
10
  presence_penalty: 0
11
11
  frequency_penalty: 0
@@ -17,48 +17,84 @@ inputs:
17
17
  type: string
18
18
  response:
19
19
  type: string
20
- context:
21
- type: string
22
20
 
23
21
  ---
24
22
  system:
25
- You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. You should return a single integer value between 1 to 5 representing the evaluation metric. You will include no other text or information.
23
+ # Instruction
24
+ ## Goal
25
+ ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
26
+ - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
27
+ - **Data**: Your input data include QUERY and RESPONSE.
28
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
29
+
26
30
  user:
27
- Relevance measures how well the answer addresses the main aspects of the question, based on the context. Consider whether all and only the important aspects are contained in the answer when evaluating relevance. Given the context and question, score the relevance of the answer between one to five stars using the following rating scale:
28
- One star: the answer completely lacks relevance
29
- Two stars: the answer mostly lacks relevance
30
- Three stars: the answer is partially relevant
31
- Four stars: the answer is mostly relevant
32
- Five stars: the answer has perfect relevance
33
-
34
- This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5.
35
-
36
- context: Marie Curie was a Polish-born physicist and chemist who pioneered research on radioactivity and was the first woman to win a Nobel Prize.
37
- question: What field did Marie Curie excel in?
38
- answer: Marie Curie was a renowned painter who focused mainly on impressionist styles and techniques.
39
- stars: 1
40
-
41
- context: The Beatles were an English rock band formed in Liverpool in 1960, and they are widely regarded as the most influential music band in history.
42
- question: Where were The Beatles formed?
43
- answer: The band The Beatles began their journey in London, England, and they changed the history of music.
44
- stars: 2
45
-
46
- context: The recent Mars rover, Perseverance, was launched in 2020 with the main goal of searching for signs of ancient life on Mars. The rover also carries an experiment called MOXIE, which aims to generate oxygen from the Martian atmosphere.
47
- question: What are the main goals of Perseverance Mars rover mission?
48
- answer: The Perseverance Mars rover mission focuses on searching for signs of ancient life on Mars.
49
- stars: 3
50
-
51
- context: The Mediterranean diet is a commonly recommended dietary plan that emphasizes fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. Studies have shown that it offers numerous health benefits, including a reduced risk of heart disease and improved cognitive health.
52
- question: What are the main components of the Mediterranean diet?
53
- answer: The Mediterranean diet primarily consists of fruits, vegetables, whole grains, and legumes.
54
- stars: 4
55
-
56
- context: The Queen's Royal Castle is a well-known tourist attraction in the United Kingdom. It spans over 500 acres and contains extensive gardens and parks. The castle was built in the 15th century and has been home to generations of royalty.
57
- question: What are the main attractions of the Queen's Royal Castle?
58
- answer: The main attractions of the Queen's Royal Castle are its expansive 500-acre grounds, extensive gardens, parks, and the historical castle itself, which dates back to the 15th century and has housed generations of royalty.
59
- stars: 5
60
-
61
- context: {{context}}
62
- question: {{query}}
63
- answer: {{response}}
64
- stars:
31
+ # Definition
32
+ **Relevance** refers to how effectively a response addresses a question. It assesses the accuracy, completeness, and direct relevance of the response based solely on the given information.
33
+
34
+ # Ratings
35
+ ## [Relevance: 1] (Irrelevant Response)
36
+ **Definition:** The response is unrelated to the question. It provides information that is off-topic and does not attempt to address the question posed.
37
+
38
+ **Examples:**
39
+ **Query:** What is the team preparing for?
40
+ **Response:** I went grocery shopping yesterday evening.
41
+
42
+ **Query:** When will the company's new product line launch?
43
+ **Response:** International travel can be very rewarding and educational.
44
+
45
+ ## [Relevance: 2] (Incorrect Response)
46
+ **Definition:** The response attempts to address the question but includes incorrect information. It provides a response that is factually wrong based on the provided information.
47
+
48
+ **Examples:**
49
+ **Query:** When was the merger between the two firms finalized?
50
+ **Response:** The merger was finalized on April 10th.
51
+
52
+ **Query:** Where and when will the solar eclipse be visible?
53
+ **Response:** The solar eclipse will be visible in Asia on December 14th.
54
+
55
+ ## [Relevance: 3] (Incomplete Response)
56
+ **Definition:** The response addresses the question but omits key details necessary for a full understanding. It provides a partial response that lacks essential information.
57
+
58
+ **Examples:**
59
+ **Query:** What type of food does the new restaurant offer?
60
+ **Response:** The restaurant offers Italian food like pasta.
61
+
62
+ **Query:** What topics will the conference cover?
63
+ **Response:** The conference will cover renewable energy and climate change.
64
+
65
+ ## [Relevance: 4] (Complete Response)
66
+ **Definition:** The response fully addresses the question with accurate and complete information. It includes all essential details required for a comprehensive understanding, without adding any extraneous information.
67
+
68
+ **Examples:**
69
+ **Query:** What type of food does the new restaurant offer?
70
+ **Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto.
71
+
72
+ **Query:** What topics will the conference cover?
73
+ **Response:** The conference will cover renewable energy, climate change, and sustainability practices.
74
+
75
+ ## [Relevance: 5] (Comprehensive Response with Insights)
76
+ **Definition:** The response not only fully and accurately addresses the question but also includes additional relevant insights or elaboration. It may explain the significance, implications, or provide minor inferences that enhance understanding.
77
+
78
+ **Examples:**
79
+ **Query:** What type of food does the new restaurant offer?
80
+ **Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto, aiming to provide customers with an authentic Italian dining experience.
81
+
82
+ **Query:** What topics will the conference cover?
83
+ **Response:** The conference will cover renewable energy, climate change, and sustainability practices, bringing together global experts to discuss these critical issues.
84
+
85
+
86
+
87
+ # Data
88
+ QUERY: {{query}}
89
+ RESPONSE: {{response}}
90
+
91
+
92
+ # Tasks
93
+ ## Please provide your assessment Score for the previous RESPONSE in relation to the QUERY based on the Definitions above. Your output should include the following information:
94
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
95
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
96
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
97
+
98
+
99
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
100
+ # Output
@@ -6,16 +6,14 @@ import json
6
6
  import logging
7
7
  import math
8
8
  import os
9
- import re
10
- from typing import Union
9
+ from typing import Optional
11
10
 
12
11
  from promptflow._utils.async_utils import async_run_allowing_running_loop
13
12
  from promptflow.core import AsyncPrompty
14
13
 
15
- from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
16
-
14
+ from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
17
15
  from ..._common.math import list_mean_nan_safe
18
- from ..._common.utils import construct_prompty_model_config, validate_model_config
16
+ from ..._common.utils import construct_prompty_model_config, validate_model_config, parse_quality_evaluator_reason_score
19
17
 
20
18
  logger = logging.getLogger(__name__)
21
19
 
@@ -27,73 +25,85 @@ except ImportError:
27
25
 
28
26
  class _AsyncRetrievalScoreEvaluator:
29
27
  # Constants must be defined within eval's directory to be save/loadable
30
- PROMPTY_FILE = "retrieval.prompty"
31
- LLM_CALL_TIMEOUT = 600
32
- DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
28
+ _PROMPTY_FILE = "retrieval.prompty"
29
+ _LLM_CALL_TIMEOUT = 600
30
+ _DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
33
31
 
34
- def __init__(self, model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration]):
32
+ def __init__(self, model_config: dict):
35
33
  prompty_model_config = construct_prompty_model_config(
36
- model_config,
37
- self.DEFAULT_OPEN_API_VERSION,
34
+ validate_model_config(model_config),
35
+ self._DEFAULT_OPEN_API_VERSION,
38
36
  USER_AGENT,
39
37
  )
40
38
 
41
39
  current_dir = os.path.dirname(__file__)
42
- prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
40
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
43
41
  self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
44
42
 
45
- async def __call__(self, *, conversation, **kwargs):
46
- # Extract queries, responses and contexts from conversation
47
- queries = []
48
- responses = []
49
- contexts = []
50
-
51
- for each_turn in conversation:
52
- role = each_turn["role"]
53
- if role == "user":
54
- queries.append(each_turn["content"])
55
- elif role == "assistant":
56
- responses.append(each_turn["content"])
57
- if "context" in each_turn and "citations" in each_turn["context"]:
58
- citations = json.dumps(each_turn["context"]["citations"])
59
- contexts.append(citations)
60
-
61
- # Evaluate each turn
62
- per_turn_scores = []
63
- history = []
64
- for turn_num, query in enumerate(queries):
65
- try:
66
- query = query if turn_num < len(queries) else ""
67
- answer = responses[turn_num] if turn_num < len(responses) else ""
68
- context = contexts[turn_num] if turn_num < len(contexts) else ""
69
-
70
- history.append({"user": query, "assistant": answer})
71
-
72
- llm_output = await self._flow(
73
- query=query, history=history, documents=context, timeout=self.LLM_CALL_TIMEOUT, **kwargs
74
- )
75
- score = math.nan
76
- if llm_output:
77
- parsed_score_response = re.findall(r"\d+", llm_output.split("# Result")[-1].strip())
78
- if len(parsed_score_response) > 0:
79
- score = float(parsed_score_response[0].replace("'", "").strip())
80
-
81
- per_turn_scores.append(score)
82
-
83
- except Exception as e: # pylint: disable=broad-exception-caught
84
- logger.warning(
85
- "Evaluator %s failed for turn %s with exception: %s", self.__class__.__name__, turn_num + 1, e
86
- )
87
-
88
- per_turn_scores.append(math.nan)
43
+ async def __call__(self, *, query, context, conversation, **kwargs):
44
+ if conversation:
45
+ # Extract queries, responses and contexts from conversation
46
+ queries = []
47
+ responses = []
48
+ contexts = []
49
+
50
+ conversation = conversation.get("messages", None)
51
+
52
+ for each_turn in conversation:
53
+ role = each_turn["role"]
54
+ if role == "user":
55
+ queries.append(each_turn["content"])
56
+ elif role == "assistant":
57
+ responses.append(each_turn["content"])
58
+ if "context" in each_turn:
59
+ if "citations" in each_turn["context"]:
60
+ citations = json.dumps(each_turn["context"]["citations"])
61
+ contexts.append(citations)
62
+ elif isinstance(each_turn["context"], str):
63
+ contexts.append(each_turn["context"])
64
+
65
+ # Evaluate each turn
66
+ per_turn_scores = []
67
+ per_turn_reasons = []
68
+ for turn_num, turn_query in enumerate(queries):
69
+ try:
70
+ if turn_num >= len(queries):
71
+ turn_query = ""
72
+ context = contexts[turn_num] if turn_num < len(contexts) else ""
73
+
74
+ llm_output = await self._flow(
75
+ query=turn_query, context=context, timeout=self._LLM_CALL_TIMEOUT, **kwargs
76
+ )
77
+ score, reason = parse_quality_evaluator_reason_score(llm_output)
78
+ per_turn_scores.append(score)
79
+ per_turn_reasons.append(reason)
80
+
81
+ except Exception as e: # pylint: disable=broad-exception-caught
82
+ logger.warning(
83
+ "Evaluator %s failed for turn %s with exception: %s", self.__class__.__name__, turn_num + 1, e
84
+ )
85
+
86
+ per_turn_scores.append(math.nan)
87
+ per_turn_reasons.append("")
88
+
89
+ mean_per_turn_score = list_mean_nan_safe(per_turn_scores)
90
+
91
+ return {
92
+ "retrieval": mean_per_turn_score,
93
+ "gpt_retrieval": mean_per_turn_score,
94
+ "evaluation_per_turn": {
95
+ "gpt_retrieval": per_turn_scores,
96
+ "retrieval": per_turn_scores,
97
+ "retrieval_reason": per_turn_reasons,
98
+ },
99
+ }
100
+ llm_output = await self._flow(query=query, context=context, timeout=self._LLM_CALL_TIMEOUT, **kwargs)
101
+ score, reason = parse_quality_evaluator_reason_score(llm_output)
89
102
 
90
103
  return {
91
- "gpt_retrieval": list_mean_nan_safe(per_turn_scores),
92
- "evaluation_per_turn": {
93
- "gpt_retrieval": {
94
- "score": per_turn_scores,
95
- }
96
- },
104
+ "retrieval": score,
105
+ "retrieval_reason": reason,
106
+ "gpt_retrieval": score,
97
107
  }
98
108
 
99
109
 
@@ -111,16 +121,16 @@ class RetrievalEvaluator:
111
121
 
112
122
  .. code-block:: python
113
123
 
114
- chat_eval = RetrievalScoreEvaluator(model_config)
115
- conversation = [
116
- {"role": "user", "content": "What is the value of 2 + 2?"},
117
- {"role": "assistant", "content": "2 + 2 = 4", "context": {
118
- "citations": [
119
- {"id": "math_doc.md", "content": "Information about additions: 1 + 2 = 3, 2 + 2 = 4"}
120
- ]
124
+ chat_eval = RetrievalEvaluator(model_config)
125
+ conversation = {
126
+ "messages": [
127
+ {"role": "user", "content": "What is the value of 2 + 2?"},
128
+ {
129
+ "role": "assistant", "content": "2 + 2 = 4",
130
+ "context": "From 'math_doc.md': Information about additions: 1 + 2 = 3, 2 + 2 = 4"
121
131
  }
122
- }
123
- ]
132
+ ]
133
+ }
124
134
  result = chat_eval(conversation=conversation)
125
135
 
126
136
  **Output format**
@@ -128,27 +138,60 @@ class RetrievalEvaluator:
128
138
  .. code-block:: python
129
139
 
130
140
  {
131
- "gpt_retrieval": 3.0
141
+ "gpt_retrieval": 3.0,
142
+ "retrieval": 3.0,
132
143
  "evaluation_per_turn": {
133
- "gpt_retrieval": {
134
- "score": [1.0, 2.0, 3.0]
135
- }
144
+ "gpt_retrieval": [1.0, 2.0, 3.0],
145
+ "retrieval": [1.0, 2.0, 3.0],
146
+ "retrieval_reason": ["<reasoning for score 1>", "<reasoning for score 2>", "<reasoning for score 3>"]
136
147
  }
137
148
  }
149
+
150
+ Note: To align with our support of a diverse set of models, a key without the `gpt_` prefix has been added.
151
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
152
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
138
153
  """
139
154
 
140
- def __init__(self, model_config: dict):
141
- self._async_evaluator = _AsyncRetrievalScoreEvaluator(validate_model_config(model_config))
155
+ def __init__(self, model_config):
156
+ self._async_evaluator = _AsyncRetrievalScoreEvaluator(model_config)
142
157
 
143
- def __call__(self, *, conversation, **kwargs):
144
- """Evaluates retrieval score chat scenario.
158
+ def __call__(self, *, query: Optional[str] = None, context: Optional[str] = None, conversation=None, **kwargs):
159
+ """Evaluates retrieval score chat scenario. Accepts either a query and context for a single evaluation,
160
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
161
+ the evaluator will aggregate the results of each turn.
145
162
 
163
+ :keyword query: The query to be evaluated. Mutually exclusive with `conversation` parameter.
164
+ :paramtype query: Optional[str]
165
+ :keyword context: The context to be evaluated. Mutually exclusive with `conversation` parameter.
166
+ :paramtype context: Optional[str]
146
167
  :keyword conversation: The conversation to be evaluated.
147
- :paramtype conversation: List[Dict]
168
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
148
169
  :return: The scores for Chat scenario.
149
- :rtype: dict
170
+ :rtype: :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
150
171
  """
151
- return async_run_allowing_running_loop(self._async_evaluator, conversation=conversation, **kwargs)
172
+ if (query is None or context is None) and conversation is None:
173
+ msg = "Either a pair of 'query'/'context' or 'conversation' must be provided."
174
+ raise EvaluationException(
175
+ message=msg,
176
+ internal_message=msg,
177
+ blame=ErrorBlame.USER_ERROR,
178
+ category=ErrorCategory.MISSING_FIELD,
179
+ target=ErrorTarget.RETRIEVAL_EVALUATOR,
180
+ )
181
+
182
+ if (query or context) and conversation:
183
+ msg = "Either a pair of 'query'/'context' or 'conversation' must be provided, but not both."
184
+ raise EvaluationException(
185
+ message=msg,
186
+ internal_message=msg,
187
+ blame=ErrorBlame.USER_ERROR,
188
+ category=ErrorCategory.INVALID_VALUE,
189
+ target=ErrorTarget.RETRIEVAL_EVALUATOR,
190
+ )
191
+
192
+ return async_run_allowing_running_loop(
193
+ self._async_evaluator, query=query, context=context, conversation=conversation, **kwargs
194
+ )
152
195
 
153
196
  def _to_async(self):
154
197
  return self._async_evaluator
@@ -1,10 +1,11 @@
1
1
  ---
2
2
  name: Retrieval
3
- description: Evaluates retrieval score for Chat scenario
3
+ description: Evaluates retrieval quality score for RAG scenario
4
4
  model:
5
5
  api: chat
6
6
  parameters:
7
7
  temperature: 0.0
8
+ max_tokens: 1600
8
9
  top_p: 1.0
9
10
  presence_penalty: 0
10
11
  frequency_penalty: 0
@@ -14,30 +15,79 @@ model:
14
15
  inputs:
15
16
  query:
16
17
  type: string
17
- history:
18
- type: string
19
- documents:
18
+ context:
20
19
  type: string
21
20
 
22
21
  ---
23
22
  system:
24
- A chat history between user and bot is shown below
25
- A list of documents is shown below in json format, and each document has one unique id.
26
- These listed documents are used as context to answer the given question.
27
- The task is to score the relevance between the documents and the potential answer to the given question in the range of 1 to 5.
28
- 1 means none of the documents is relevant to the question at all. 5 means either one of the document or combination of a few documents is ideal for answering the given question.
29
- Think through step by step:
30
- - Summarize each given document first
31
- - Determine the underlying intent of the given question, when the question is ambiguous, refer to the given chat history
32
- - Measure how suitable each document to the given question, list the document id and the corresponding relevance score.
33
- - Summarize the overall relevance of given list of documents to the given question after # Overall Reason, note that the answer to the question can solely from single document or a combination of multiple documents.
34
- - Finally, output "# Result" followed by a score from 1 to 5.
35
-
36
- # Question
37
- {{ query }}
38
- # Chat History
39
- {{ history }}
40
- # Documents
41
- ===BEGIN RETRIEVED DOCUMENTS===
42
- {{ documents }}
43
- ===END RETRIEVED DOCUMENTS===
23
+ # Instruction
24
+ ## Goal
25
+ ### You are an expert in evaluating the quality of a list of CONTEXT chunks from a query based on provided definition and data. Your goal will involve answering the questions below using the information provided.
26
+ - **Definition**: You are given a definition of the retrieval quality that is being evaluated to help guide your Score.
27
+ - **Data**: Your input data include QUERY and CONTEXT.
28
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
29
+
30
+ user:
31
+ # Definition
32
+ **Retrieval** refers to measuring how relevant the context chunks are to address a query and how the most relevant context chunks are surfaced at the top of the list. It emphasizes the extraction and ranking of the most relevant information at the top, without introducing bias from external knowledge and ignoring factual correctness. It assesses the relevance and effectiveness of the retrieved context chunks with respect to the query.
33
+
34
+ # Ratings
35
+ ## [Retrieval: 1] (Irrelevant Context, External Knowledge Bias)
36
+ **Definition:** The retrieved context chunks are not relevant to the query despite any conceptual similarities. There is no overlap between the query and the retrieved information, and no useful chunks appear in the results. They introduce external knowledge that isn't part of the retrieval documents.
37
+
38
+ **Examples:**
39
+ **Query:** what is kuchen?
40
+ **Context:** ["There's nothing like the taste of a cake you made in your own kitchen. Baking a cake is as simple as measuring ingredients, mixing them in the right order, and remembering to take the cake out of the oven before it burns.", "A steady 325-350 degrees is ideal when it comes to baking pound cake. Position the pan in the middle of the oven, and rotate it once, halfway through the baking time, as it bakes to account for any hot spots. "CHOCOLATE POUND CAKE. Cream butter, sugar ... and floured bundt pan, 10 inch pan or 2 (9x5x3 inch) loaf pans. Bake at ... pans. Bake until cake tester inserted in ... to drizzle down sides. 4. BUTTERMILK LEMON POUND CAKE."", "Pour batter into your pan(s) and place in the oven. Cook for 75 minutes, checking periodically. Some ovens cook unevenly or quickly -- if this describes yours, keep an eye on it. 1 If to be used for fancy ornamented cakes, bake 30 to 35 minutes in a dripping-pan. 2 Insert a skewer or toothpick to see if it's finished.", "As a general rule of thumb you can bake most cakes at 375 degrees Fahrenheit (which is 180 degrees Celsius) and check them after about 30 minutes and expect it to take at least 45 minutes.", "Till a toothpick inserted in the center of the cake comes out clean. Depends on the heat of your oven but start checking at about 45 minutes and when the cake is golden brown. sonnyboy · 8 years ago. Thumbs up.", "1 This results in a pound cake with maximum volume. 2 Be patient. Beat softened butter (and cream cheese or vegetable shortening) at medium speed with an electric mixer until creamy. 3 This can take from 1 to 7 minutes, depending on the power of your mixer."]
41
+
42
+ **Query:** What are the main economic impacts of global warming?
43
+ **Context:** ["Economic theories such as supply and demand explain how prices fluctuate in a free market.", "Global warming is caused by increased carbon dioxide levels, which affect the environment and the atmosphere.", "Political factors also play a role in economic decisions across nations."]
44
+
45
+ ## [Retrieval: 2] (Partially Relevant Context, Poor Ranking, External Knowledge Bias)
46
+ **Definition:** The context chunks are partially relevant to address the query but are mostly irrelevant, and external knowledge or LLM bias starts influencing the context chunks. The most relevant chunks are either missing or placed at the bottom.
47
+
48
+ **Examples:**
49
+ **Query:** what is rappelling
50
+ **Context:** ["5. Cancel. Rappelling is the process of coming down from a mountain that is usually done with two pieces of rope. Use a natural anchor or a set of bolts to rappel from with help from an experienced rock climber in this free video on rappelling techniques. Part of the Video Series: Rappelling & Rock Climbing.", "Abseiling (/ˈaebseɪl/ ˈæbseɪl /or/ ; ˈɑːpzaɪl From german, abseilen meaning to rope), down also called, rappelling is the controlled descent of a vertical, drop such as a rock, face using a. Rope climbers use this technique when a cliff or slope is too steep/and or dangerous to descend without. protection", "1. rappel - (mountaineering) a descent of a vertical cliff or wall made by using a doubled rope that is fixed to a higher point and wrapped around the body. abseil. mountain climbing, mountaineering-the activity of climbing a mountain. descent-the act of changing your location in a downward direction."]
51
+
52
+ **Query:** Describe the causes of the French Revolution.
53
+ **Context:** ["The French Revolution started due to economic disparity, leading to unrest among the lower classes.", "The Industrial Revolution also contributed to changes in society during the 18th century.", "Philosophers like Rousseau inspired revolutionary thinking, but the taxation system played a role as well."]
54
+
55
+ ## [Retrieval: 3] (Relevant Context Ranked Bottom)
56
+ **Definition:** The context chunks contain relevant information to address the query, but the most pertinent chunks are located at the bottom of the list.
57
+
58
+ **Examples:**
59
+ **Query:** what are monocytes
60
+ **Context:** ["Monocytes are produced by the bone marrow from precursors called monoblasts, bipotent cells that differentiated from hematopoietic stem cells. Monocytes circulate in the bloodstream for about one to three days and then typically move into tissues throughout the body. Monocytes which migrate from the bloodstream to other tissues will then differentiate into tissue resident macrophages or dendritic cells. Macrophages are responsible for protecting tissues from foreign substances, but are also suspected to be important in the formation of important organs like the heart and brain.", "Report Abuse. A high level of monocytes could mean a number of things. They're a type of phagocyte-a type of cell found in your blood that 'eats' many types of attacking bacteria and other microorganisms when it matures. High levels could mean that you have an infection as more develop to fight it.", "Our immune system has a key component called the white blood cells, of which there are several different kinds. Monocytes are a type of white blood cell that fights off bacteria, viruses and fungi. Monocytes are the biggest type of white blood cell in the immune system. Originally formed in the bone marrow, they are released into our blood and tissues. When certain germs enter the body, they quickly rush to the site for attack.", "Monocyte. Monocytes are produced by the bone marrow from stem cell precursors called monoblasts. Monocytes circulate in the bloodstream for about one to three days and then typically move into tissues throughout the body. They make up three to eight percent of the leukocytes in the blood. Monocyte under a light microscope (40x) from a peripheral blood smear surrounded by red blood cells. Monocytes are a type of white blood cell, part of the human body's immune system. They are usually identified in stained smears by their large two-lobed nucleus.", "A monocyte (pictured below) is a large type of white blood cell with one large, smooth, well-defined, indented, slightly folded, oval, kidney-shaped, or notched nucleus (the cell's control center). White blood cells help protect the body against diseases and fight infections.", "Monocytes are white blood cells that are common to the blood of all vertebrates and they help the immune system to function properly. There are a number of reasons for a high monocyte count, which can also be called monocytosis. Some of the reasons can include stress, viral fevers, inflammation and organ necrosis. A physician may order a monocyte blood count test to check for raised levels of monocytes. There are a number of reasons for this test, from a simple health check up to people suffering from heart attacks and leukemia. Complications with the blood and cancer are two other reasons that this test may be performed.", "Monocytes are considered the largest white blood cell. These cells are part of the innate immune system. Monocytes also play important roles in the immune function of the body. These cells are often found when doing a stained smear and appear large kidney shaped. Many of these are found in the spleen area.", "This is taken directly from-http://www.wisegeek.com/what-are-monocytes.htm#. Monocytes are a type of leukocyte or white blood cell which play a role in immune system function. Depending on a patient's level of health, monocytes make up between one and three percent of the total white blood cells in the body. For example, if monocytes are elevated because of an inflammation caused by a viral infection, the patient would be given medication to kill the virus and bring down the inflammation. Typically, when a monocyte count is requested, the lab will also run other tests on the blood to generate a complete picture.", "3D Rendering of a Monocyte. Monocytes are a type of white blood cells (leukocytes). They are the largest of all leukocytes. They are part of the innate immune system of vertebrates including all mammals (humans included), birds, reptiles, and fish. Monocytes which migrate from the bloodstream to other tissues will then differentiate into tissue resident macrophages or dendritic cells. Macrophages are responsible for protecting tissues from foreign substances, but are also suspected to be important in the formation of important organs like the heart and brain."]
61
+
62
+ **Query:** What were the key features of the Magna Carta?
63
+ **Context:** ["The Magna Carta influenced the legal system in Europe, especially in constitutional law.", "It was signed in 1215 by King John of England to limit the powers of the monarchy.", "The Magna Carta introduced principles like due process and habeas corpus, which are key features of modern legal systems."]
64
+
65
+ ## [Retrieval: 4] (Relevant Context Ranked Middle, No External Knowledge Bias and Factual Accuracy Ignored)
66
+ **Definition:** The context chunks fully address the query, but the most relevant chunk is ranked in the middle of the list. No external knowledge is used to influence the ranking of the chunks; the system only relies on the provided context. Factual accuracy remains out of scope for evaluation.
67
+
68
+ **Examples:**
69
+ **Query:** do game shows pay their contestants
70
+ **Context:** ["So, in the end, game show winners get some of the money that TV advertisers pay to the networks, who pay the show producers, who then pay the game show winners. Just in the same way that the actors, and crew of a show get paid. Game shows, like other programs, have costs to produce the programs—they have to pay for sets, cameras, talent (the hosts), and also prizes to contestants.", "(Valerie Macon/Getty Images). Oh, happy day! You're a contestant on a popular game show—The Price Is Right, let's say. You spin the wheel, you make the winning bid, and suddenly—ka-ching!—you've won the Lexus or the dishwasher or the lifetime supply of nail clippers.", "1 If you can use most of the prizes the show offers, such as a new car or trip, you may be content to appear on a game show that features material prizes. 2 If not, you should probably try out for a show where cash is the main prize. 3 In the United States, game show contestants must pay taxes on any prizes they win. 2. Meet the eligibility requirements. All game shows have certain eligibility requirements for their contestants. Generally, you must be at least 18 years of age, except for those shows that use child or teenage contestants, and you are allowed to appear on no more than 1 game show per year.", "Rating Newest Oldest. Best Answer: You don't always win the money amount on the front of your lectern when you are on a game show. As someone else said, 2nd place earns $2000 and 3rd place earns $1000 in Jeopardy! In any case, the prize money is paid out from the ad revenue that the show receives from sponsors. I think in this case Who Wants to be a Millionaire or Deal or No Deal is the best example of how shows can be successful while still paying the prize money. I feel this way because these shows have a potential, however small it may be, to pay out 1 million dollars to every contestant on the show. Here is the reality. Regardless of the show whether it be a game show or a drama, a network will receive money from commercial advertising based on the viewership. With this in mind a game show costs very little to actually air compared to a full production drama series, that's where the prize money comes from"]
71
+
72
+ ## [Retrieval: 5] (Highly Relevant, Well Ranked, No Bias Introduced)
73
+ **Definition:** The context chunks not only fully address the query, but also surface the most relevant chunks at the top of the list. The retrieval respects the internal context, avoids relying on any outside knowledge, and focuses solely on pulling the most useful content to the forefront, irrespective of the factual correctness of the information.
74
+
75
+ **Examples:**
76
+ **Query:** The smallest blood vessels in your body, where gas exchange occurs are called
77
+ **Context:** ["Gas exchange is the delivery of oxygen from the lungs to the bloodstream, and the elimination of carbon dioxide from the bloodstream to the lungs. It occurs in the lungs between the alveoli and a network of tiny blood vessels called capillaries, which are located in the walls of the alveoli. The walls of the alveoli actually share a membrane with the capillaries in which oxygen and carbon dioxide move freely between the respiratory system and the bloodstream.", "Arterioles branch into capillaries, the smallest of all blood vessels. Capillaries are the sites of nutrient and waste exchange between the blood and body cells. Capillaries are microscopic vessels that join the arterial system with the venous system.", "Arterioles are the smallest arteries and regulate blood flow into capillary beds through vasoconstriction and vasodilation. Capillaries are the smallest vessels and allow for exchange of substances between the blood and interstitial fluid. Continuous capillaries are most common and allow passage of fluids and small solutes. Fenestrated capillaries are more permeable to fluids and solutes than continuous capillaries.", "Tweet. The smallest blood vessels in the human body are capillaries. They are responsible for the absorption of oxygen into the blood stream and for removing the deoxygenated red blood cells for return to the heart and lungs for reoxygenation.", "2. Capillaries—these are the sites of gas exchange between the tissues. 3. Veins—these return oxygen poor blood to the heart, except for the vein that carries blood from the lungs. On the right is a diagram showing how the three connect. Notice the artery and vein are much larger than the capillaries.", "Gas exchange occurs in the capillaries which are the smallest blood vessels in the body. Each artery that comes from the heart is surrounded by capillaries so that they can take it to the various parts of the body."]
78
+
79
+
80
+ # Data
81
+ QUERY: {{query}}
82
+ CONTEXT: {{context}}
83
+
84
+
85
+ # Tasks
86
+ ## Please provide your assessment Score for the previous CONTEXT in relation to the QUERY based on the Definitions above. Your output should include the following information:
87
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
88
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
89
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
90
+
91
+
92
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
93
+ # Output
@@ -4,8 +4,8 @@
4
4
  from enum import Enum
5
5
 
6
6
  from promptflow._utils.async_utils import async_run_allowing_running_loop
7
- from azure.ai.evaluation._vendor.rouge_score import rouge_scorer
8
7
 
8
+ from azure.ai.evaluation._vendor.rouge_score import rouge_scorer
9
9
  from azure.core import CaseInsensitiveEnumMeta
10
10
 
11
11
 
@@ -89,7 +89,7 @@ class RougeScoreEvaluator:
89
89
  :keyword ground_truth: The ground truth to be compared against.
90
90
  :paramtype ground_truth: str
91
91
  :return: The ROUGE score.
92
- :rtype: dict
92
+ :rtype: Dict[str, float]
93
93
  """
94
94
  return async_run_allowing_running_loop(
95
95
  self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._service_groundedness import GroundednessProEvaluator
6
+
7
+ __all__ = [
8
+ "GroundednessProEvaluator",
9
+ ]