azure-ai-evaluation 1.0.0b4__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. azure/ai/evaluation/__init__.py +22 -0
  2. azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +4 -0
  3. azure/ai/evaluation/_common/constants.py +5 -0
  4. azure/ai/evaluation/_common/math.py +73 -2
  5. azure/ai/evaluation/_common/rai_service.py +250 -62
  6. azure/ai/evaluation/_common/utils.py +196 -23
  7. azure/ai/evaluation/_constants.py +7 -6
  8. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/__init__.py +3 -2
  9. azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +13 -4
  10. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/proxy_client.py +19 -6
  11. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +46 -0
  12. azure/ai/evaluation/_evaluate/_eval_run.py +55 -14
  13. azure/ai/evaluation/_evaluate/_evaluate.py +312 -228
  14. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +7 -6
  15. azure/ai/evaluation/_evaluate/_utils.py +46 -11
  16. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +17 -18
  17. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +67 -31
  18. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -34
  19. azure/ai/evaluation/_evaluators/_common/_base_eval.py +37 -24
  20. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +21 -9
  21. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +52 -16
  22. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +91 -48
  23. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +100 -26
  24. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +94 -26
  25. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +96 -26
  26. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +97 -26
  27. azure/ai/evaluation/_evaluators/_eci/_eci.py +31 -4
  28. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
  29. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +67 -36
  30. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -36
  31. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +14 -16
  32. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +106 -34
  33. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
  34. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
  35. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +20 -27
  36. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
  37. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +132 -0
  38. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +55 -0
  39. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +100 -0
  40. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +124 -0
  41. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +100 -0
  42. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +100 -0
  43. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +100 -0
  44. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +87 -31
  45. azure/ai/evaluation/_evaluators/_qa/_qa.py +23 -31
  46. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +72 -36
  47. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +78 -42
  48. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +83 -125
  49. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +74 -24
  50. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +26 -27
  51. azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
  52. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +148 -0
  53. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +37 -28
  54. azure/ai/evaluation/_evaluators/_xpia/xpia.py +94 -33
  55. azure/ai/evaluation/_exceptions.py +19 -0
  56. azure/ai/evaluation/_model_configurations.py +83 -15
  57. azure/ai/evaluation/_version.py +1 -1
  58. azure/ai/evaluation/simulator/__init__.py +2 -1
  59. azure/ai/evaluation/simulator/_adversarial_scenario.py +20 -1
  60. azure/ai/evaluation/simulator/_adversarial_simulator.py +29 -35
  61. azure/ai/evaluation/simulator/_constants.py +11 -1
  62. azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
  63. azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
  64. azure/ai/evaluation/simulator/_direct_attack_simulator.py +17 -9
  65. azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
  66. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +22 -1
  67. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +90 -35
  68. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +4 -2
  69. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +8 -4
  70. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -4
  71. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -1
  72. azure/ai/evaluation/simulator/_simulator.py +165 -105
  73. azure/ai/evaluation/simulator/_utils.py +31 -13
  74. azure_ai_evaluation-1.0.1.dist-info/METADATA +600 -0
  75. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/NOTICE.txt +20 -0
  76. azure_ai_evaluation-1.0.1.dist-info/RECORD +119 -0
  77. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/WHEEL +1 -1
  78. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
  79. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
  80. azure_ai_evaluation-1.0.0b4.dist-info/METADATA +0 -535
  81. azure_ai_evaluation-1.0.0b4.dist-info/RECORD +0 -106
  82. /azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +0 -0
  83. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/top_level.txt +0 -0
@@ -3,71 +3,102 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  import os
6
- from typing import Optional
6
+ from typing import Dict, List, Union
7
7
 
8
- from typing_extensions import override
8
+ from typing_extensions import overload, override
9
9
 
10
10
  from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
+ from azure.ai.evaluation._model_configurations import Conversation
11
12
 
12
13
 
13
- class FluencyEvaluator(PromptyEvaluatorBase):
14
+ class FluencyEvaluator(PromptyEvaluatorBase[Union[str, float]]):
14
15
  """
15
- Initialize a fluency evaluator configured for a specific Azure OpenAI model.
16
+ Evaluates the fluency of a given response or a multi-turn conversation, including reasoning.
17
+
18
+ The fluency measure assesses the extent to which the generated text conforms to grammatical rules, syntactic
19
+ structures, and appropriate vocabulary usage, resulting in linguistically correct responses.
20
+
21
+ Fluency scores range from 1 to 5, with 1 being the least fluent and 5 being the most fluent.
16
22
 
17
23
  :param model_config: Configuration for the Azure OpenAI model.
18
24
  :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
19
25
  ~azure.ai.evaluation.OpenAIModelConfiguration]
20
26
 
21
- **Usage**
22
-
23
- .. code-block:: python
27
+ .. admonition:: Example:
24
28
 
25
- eval_fn = FluencyEvaluator(model_config)
26
- result = eval_fn(
27
- query="What is the capital of Japan?",
28
- response="The capital of Japan is Tokyo.")
29
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
30
+ :start-after: [START fluency_evaluator]
31
+ :end-before: [END fluency_evaluator]
32
+ :language: python
33
+ :dedent: 8
34
+ :caption: Initialize and call a FluencyEvaluator.
29
35
 
30
- **Output format**
36
+ .. note::
31
37
 
32
- .. code-block:: python
33
-
34
- {
35
- "gpt_fluency": 4.0
36
- }
38
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
39
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
40
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
37
41
  """
38
42
 
39
- PROMPTY_FILE = "fluency.prompty"
40
- RESULT_KEY = "gpt_fluency"
43
+ _PROMPTY_FILE = "fluency.prompty"
44
+ _RESULT_KEY = "fluency"
45
+
46
+ id = "azureml://registries/azureml/models/Fluency-Evaluator/versions/4"
47
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
41
48
 
42
49
  @override
43
- def __init__(self, model_config: dict):
50
+ def __init__(self, model_config):
44
51
  current_dir = os.path.dirname(__file__)
45
- prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
46
- super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self.RESULT_KEY)
52
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
53
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
47
54
 
48
- @override
55
+ @overload
56
+ def __call__(
57
+ self,
58
+ *,
59
+ response: str,
60
+ ) -> Dict[str, Union[str, float]]:
61
+ """Evaluate fluency in given response
62
+
63
+ :keyword response: The response to be evaluated.
64
+ :paramtype response: str
65
+ :return: The fluency score
66
+ :rtype: Dict[str, float]
67
+ """
68
+
69
+ @overload
49
70
  def __call__(
50
71
  self,
51
72
  *,
52
- query: Optional[str] = None,
53
- response: Optional[str] = None,
54
- conversation: Optional[dict] = None,
73
+ conversation: Conversation,
74
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
75
+ """Evaluate fluency for a conversation
76
+
77
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
78
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
79
+ to be dictionaries with keys "content", "role", and possibly "context".
80
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
81
+ :return: The fluency score
82
+ :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
83
+ """
84
+
85
+ @override
86
+ def __call__( # pylint: disable=docstring-missing-param
87
+ self,
88
+ *args,
55
89
  **kwargs,
56
90
  ):
57
91
  """
58
- Evaluate fluency. Accepts either a query and response for a single evaluation,
92
+ Evaluate fluency. Accepts either a response for a single evaluation,
59
93
  or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
60
94
  the evaluator will aggregate the results of each turn.
61
95
 
62
- :keyword query: The query to be evaluated.
63
- :paramtype query: str
64
- :keyword response: The response to be evaluated.
65
- :paramtype response: str
96
+ :keyword response: The response to be evaluated. Mutually exclusive with the "conversation" parameter.
97
+ :paramtype response: Optional[str]
66
98
  :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
67
- key "messages". Conversation turns are expected
68
- to be dictionaries with keys "content" and "role".
69
- :paramtype conversation: Optional[Dict]
99
+ key "messages". Conversation turns are expected to be dictionaries with keys "content" and "role".
100
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
70
101
  :return: The fluency score.
71
- :rtype: Dict[str, float]
102
+ :rtype: Union[Dict[str, float], Dict[str, Union[float, Dict[str, List[float]]]]]
72
103
  """
73
- return super().__call__(query=query, response=response, conversation=conversation, **kwargs)
104
+ return super().__call__(*args, **kwargs)
@@ -5,7 +5,7 @@ model:
5
5
  api: chat
6
6
  parameters:
7
7
  temperature: 0.0
8
- max_tokens: 1
8
+ max_tokens: 800
9
9
  top_p: 1.0
10
10
  presence_penalty: 0
11
11
  frequency_penalty: 0
@@ -13,44 +13,74 @@ model:
13
13
  type: text
14
14
 
15
15
  inputs:
16
- query:
17
- type: string
18
16
  response:
19
17
  type: string
20
18
 
21
19
  ---
22
20
  system:
23
- You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. You should return a single integer value between 1 to 5 representing the evaluation metric. You will include no other text or information.
21
+ # Instruction
22
+ ## Goal
23
+ ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
24
+ - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
25
+ - **Data**: Your input data include a RESPONSE.
26
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
27
+
24
28
  user:
25
- Fluency measures the quality of individual sentences in the answer, and whether they are well-written and grammatically correct. Consider the quality of individual sentences when evaluating fluency. Given the question and answer, score the fluency of the answer between one to five stars using the following rating scale:
26
- One star: the answer completely lacks fluency
27
- Two stars: the answer mostly lacks fluency
28
- Three stars: the answer is partially fluent
29
- Four stars: the answer is mostly fluent
30
- Five stars: the answer has perfect fluency
31
-
32
- This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5.
33
-
34
- question: What did you have for breakfast today?
35
- answer: Breakfast today, me eating cereal and orange juice very good.
36
- stars: 1
37
-
38
- question: How do you feel when you travel alone?
39
- answer: Alone travel, nervous, but excited also. I feel adventure and like its time.
40
- stars: 2
41
-
42
- question: When was the last time you went on a family vacation?
43
- answer: Last family vacation, it took place in last summer. We traveled to a beach destination, very fun.
44
- stars: 3
45
-
46
- question: What is your favorite thing about your job?
47
- answer: My favorite aspect of my job is the chance to interact with diverse people. I am constantly learning from their experiences and stories.
48
- stars: 4
49
-
50
- question: Can you describe your morning routine?
51
- answer: Every morning, I wake up at 6 am, drink a glass of water, and do some light stretching. After that, I take a shower and get dressed for work. Then, I have a healthy breakfast, usually consisting of oatmeal and fruits, before leaving the house around 7:30 am.
52
- stars: 5
53
-
54
- question: {{query}}
55
- answer: {{response}}
56
- stars:
29
+ # Definition
30
+ **Fluency** refers to the effectiveness and clarity of written communication, focusing on grammatical accuracy, vocabulary range, sentence complexity, coherence, and overall readability. It assesses how smoothly ideas are conveyed and how easily the text can be understood by the reader.
31
+
32
+ # Ratings
33
+ ## [Fluency: 1] (Emergent Fluency)
34
+ **Definition:** The response shows minimal command of the language. It contains pervasive grammatical errors, extremely limited vocabulary, and fragmented or incoherent sentences. The message is largely incomprehensible, making understanding very difficult.
35
+
36
+ **Examples:**
37
+ **Response:** Free time I. Go park. Not fun. Alone.
38
+
39
+ **Response:** Like food pizza. Good cheese eat.
40
+
41
+ ## [Fluency: 2] (Basic Fluency)
42
+ **Definition:** The response communicates simple ideas but has frequent grammatical errors and limited vocabulary. Sentences are short and may be improperly constructed, leading to partial understanding. Repetition and awkward phrasing are common.
43
+
44
+ **Examples:**
45
+ **Response:** I like play soccer. I watch movie. It fun.
46
+
47
+ **Response:** My town small. Many people. We have market.
48
+
49
+ ## [Fluency: 3] (Competent Fluency)
50
+ **Definition:** The response clearly conveys ideas with occasional grammatical errors. Vocabulary is adequate but not extensive. Sentences are generally correct but may lack complexity and variety. The text is coherent, and the message is easily understood with minimal effort.
51
+
52
+ **Examples:**
53
+ **Response:** I'm planning to visit friends and maybe see a movie together.
54
+
55
+ **Response:** I try to eat healthy food and exercise regularly by jogging.
56
+
57
+ ## [Fluency: 4] (Proficient Fluency)
58
+ **Definition:** The response is well-articulated with good control of grammar and a varied vocabulary. Sentences are complex and well-structured, demonstrating coherence and cohesion. Minor errors may occur but do not affect overall understanding. The text flows smoothly, and ideas are connected logically.
59
+
60
+ **Examples:**
61
+ **Response:** My interest in mathematics and problem-solving inspired me to become an engineer, as I enjoy designing solutions that improve people's lives.
62
+
63
+ **Response:** Environmental conservation is crucial because it protects ecosystems, preserves biodiversity, and ensures natural resources are available for future generations.
64
+
65
+ ## [Fluency: 5] (Exceptional Fluency)
66
+ **Definition:** The response demonstrates an exceptional command of language with sophisticated vocabulary and complex, varied sentence structures. It is coherent, cohesive, and engaging, with precise and nuanced expression. Grammar is flawless, and the text reflects a high level of eloquence and style.
67
+
68
+ **Examples:**
69
+ **Response:** Globalization exerts a profound influence on cultural diversity by facilitating unprecedented cultural exchange while simultaneously risking the homogenization of distinct cultural identities, which can diminish the richness of global heritage.
70
+
71
+ **Response:** Technology revolutionizes modern education by providing interactive learning platforms, enabling personalized learning experiences, and connecting students worldwide, thereby transforming how knowledge is acquired and shared.
72
+
73
+
74
+ # Data
75
+ RESPONSE: {{response}}
76
+
77
+
78
+ # Tasks
79
+ ## Please provide your assessment Score for the previous RESPONSE based on the Definitions above. Your output should include the following information:
80
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
81
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
82
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
83
+
84
+
85
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
86
+ # Output
@@ -24,31 +24,29 @@ class _AsyncGleuScoreEvaluator:
24
24
 
25
25
  class GleuScoreEvaluator:
26
26
  """
27
- Evaluator that computes the BLEU Score between two strings.
27
+ Calculates the GLEU (Google-BLEU) score between a response and the ground truth.
28
28
 
29
29
  The GLEU (Google-BLEU) score evaluator measures the similarity between generated and reference texts by
30
30
  evaluating n-gram overlap, considering both precision and recall. This balanced evaluation, designed for
31
31
  sentence-level assessment, makes it ideal for detailed analysis of translation quality. GLEU is well-suited for
32
32
  use cases such as machine translation, text summarization, and text generation.
33
33
 
34
- **Usage**
34
+ GLEU scores range from 0 to 1, where a value of 1 represents perfect overlap between the response and
35
+ the ground truth and a value of 0 indicates no overlap.
35
36
 
36
- .. code-block:: python
37
+ .. admonition:: Example:
37
38
 
38
- eval_fn = GleuScoreEvaluator()
39
- result = eval_fn(
40
- response="Tokyo is the capital of Japan.",
41
- ground_truth="The capital of Japan is Tokyo.")
42
-
43
- **Output format**
44
-
45
- .. code-block:: python
46
-
47
- {
48
- "gleu_score": 0.41
49
- }
39
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
40
+ :start-after: [START gleu_score_evaluator]
41
+ :end-before: [END gleu_score_evaluator]
42
+ :language: python
43
+ :dedent: 8
44
+ :caption: Initialize and call a GleuScoreEvaluator.
50
45
  """
51
46
 
47
+ id = "azureml://registries/azureml/models/Gleu-Score-Evaluator/versions/3"
48
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
49
+
52
50
  def __init__(self):
53
51
  self._async_evaluator = _AsyncGleuScoreEvaluator()
54
52
 
@@ -61,7 +59,7 @@ class GleuScoreEvaluator:
61
59
  :keyword ground_truth: The ground truth to be compared against.
62
60
  :paramtype ground_truth: str
63
61
  :return: The GLEU score.
64
- :rtype: dict
62
+ :rtype: Dict[str, float]
65
63
  """
66
64
  return async_run_allowing_running_loop(
67
65
  self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
@@ -2,71 +2,143 @@
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
4
  import os
5
- from typing import Optional
5
+ from typing import Dict, List, Optional, Union
6
6
 
7
- from typing_extensions import override
7
+ from typing_extensions import overload, override
8
+ from promptflow.core import AsyncPrompty
8
9
 
9
10
  from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
+ from azure.ai.evaluation._model_configurations import Conversation
12
+ from ..._common.utils import construct_prompty_model_config, validate_model_config
10
13
 
14
+ try:
15
+ from ..._user_agent import USER_AGENT
16
+ except ImportError:
17
+ USER_AGENT = "None"
11
18
 
12
- class GroundednessEvaluator(PromptyEvaluatorBase):
19
+
20
+ class GroundednessEvaluator(PromptyEvaluatorBase[Union[str, float]]):
13
21
  """
14
- Initialize a groundedness evaluator configured for a specific Azure OpenAI model.
22
+ Evaluates groundedness score for a given query (optional), response, and context or a multi-turn conversation,
23
+ including reasoning.
15
24
 
16
- :param model_config: Configuration for the Azure OpenAI model.
17
- :type model_config: Union[~azure.ai.evalation.AzureOpenAIModelConfiguration,
18
- ~azure.ai.evalation.OpenAIModelConfiguration]
25
+ The groundedness measure assesses the correspondence between claims in an AI-generated answer and the source
26
+ context, making sure that these claims are substantiated by the context. Even if the responses from LLM are
27
+ factually correct, they'll be considered ungrounded if they can't be verified against the provided sources
28
+ (such as your input source or your database). Use the groundedness metric when you need to verify that
29
+ AI-generated responses align with and are validated by the provided context.
19
30
 
20
- **Usage**
31
+ Groundedness scores range from 1 to 5, with 1 being the least grounded and 5 being the most grounded.
21
32
 
22
- .. code-block:: python
33
+ :param model_config: Configuration for the Azure OpenAI model.
34
+ :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
35
+ ~azure.ai.evaluation.OpenAIModelConfiguration]
23
36
 
24
- eval_fn = GroundednessEvaluator(model_config)
25
- result = eval_fn(
26
- response="The capital of Japan is Tokyo.",
27
- context="Tokyo is Japan's capital, known for its blend of traditional culture \
28
- and technological advancements.")
37
+ .. admonition:: Example:
29
38
 
30
- **Output format**
39
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
40
+ :start-after: [START groundedness_evaluator]
41
+ :end-before: [END groundedness_evaluator]
42
+ :language: python
43
+ :dedent: 8
44
+ :caption: Initialize and call a GroundednessEvaluator.
31
45
 
32
- .. code-block:: python
46
+ .. note::
33
47
 
34
- {
35
- "gpt_groundedness": 5
36
- }
48
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
49
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
50
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
37
51
  """
38
52
 
39
- PROMPTY_FILE = "groundedness.prompty"
40
- RESULT_KEY = "gpt_groundedness"
53
+ _PROMPTY_FILE_NO_QUERY = "groundedness_without_query.prompty"
54
+ _PROMPTY_FILE_WITH_QUERY = "groundedness_with_query.prompty"
55
+ _RESULT_KEY = "groundedness"
56
+ _OPTIONAL_PARAMS = ["query"]
57
+
58
+ id = "azureml://registries/azureml/models/Groundedness-Evaluator/versions/4"
59
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
41
60
 
42
61
  @override
43
- def __init__(self, model_config: dict):
62
+ def __init__(self, model_config):
44
63
  current_dir = os.path.dirname(__file__)
45
- prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
46
- super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self.RESULT_KEY)
64
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE_NO_QUERY) # Default to no query
47
65
 
48
- @override
66
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
67
+ self._model_config = model_config
68
+ # Needs to be set because it's used in call method to re-validate prompt if `query` is provided
69
+
70
+ @overload
71
+ def __call__(
72
+ self,
73
+ *,
74
+ response: str,
75
+ context: str,
76
+ query: Optional[str] = None,
77
+ ) -> Dict[str, Union[str, float]]:
78
+ """Evaluate groundedness for given input of response, context
79
+
80
+ :keyword response: The response to be evaluated.
81
+ :paramtype response: str
82
+ :keyword context: The context to be evaluated.
83
+ :paramtype context: str
84
+ :keyword query: The query to be evaluated. Optional parameter for use with the `response`
85
+ and `context` parameters. If provided, a different prompt template will be used for evaluation.
86
+ :paramtype query: Optional[str]
87
+ :return: The groundedness score.
88
+ :rtype: Dict[str, float]
89
+ """
90
+
91
+ @overload
49
92
  def __call__(
50
93
  self,
51
94
  *,
52
- response: Optional[str] = None,
53
- context: Optional[str] = None,
54
- conversation: Optional[dict] = None,
95
+ conversation: Conversation,
96
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
97
+ """Evaluate groundedness for a conversation
98
+
99
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
100
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
101
+ to be dictionaries with keys "content", "role", and possibly "context".
102
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
103
+ :return: The groundedness score.
104
+ :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
105
+ """
106
+
107
+ @override
108
+ def __call__( # pylint: disable=docstring-missing-param
109
+ self,
110
+ *args,
55
111
  **kwargs,
56
112
  ):
57
- """Evaluate groundedless. Accepts either a response and context a single evaluation,
113
+ """Evaluate groundedness. Accepts either a query, response, and context for a single evaluation,
58
114
  or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
59
115
  the evaluator will aggregate the results of each turn.
60
116
 
61
- :keyword response: The response to be evaluated.
117
+ :keyword query: The query to be evaluated. Mutually exclusive with `conversation`. Optional parameter for use
118
+ with the `response` and `context` parameters. If provided, a different prompt template will be used for
119
+ evaluation.
120
+ :paramtype query: Optional[str]
121
+ :keyword response: The response to be evaluated. Mutually exclusive with the `conversation` parameter.
62
122
  :paramtype response: Optional[str]
63
- :keyword context: The context to be evaluated.
123
+ :keyword context: The context to be evaluated. Mutually exclusive with the `conversation` parameter.
64
124
  :paramtype context: Optional[str]
65
125
  :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
66
126
  key "messages", and potentially a global context under the key "context". Conversation turns are expected
67
127
  to be dictionaries with keys "content", "role", and possibly "context".
68
- :paramtype conversation: Optional[Dict]
128
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
69
129
  :return: The relevance score.
70
- :rtype: Dict[str, float]
130
+ :rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
71
131
  """
72
- return super().__call__(response=response, context=context, conversation=conversation, **kwargs)
132
+
133
+ if kwargs.get("query", None):
134
+ current_dir = os.path.dirname(__file__)
135
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE_WITH_QUERY)
136
+ self._prompty_file = prompty_path
137
+ prompty_model_config = construct_prompty_model_config(
138
+ validate_model_config(self._model_config),
139
+ self._DEFAULT_OPEN_API_VERSION,
140
+ USER_AGENT,
141
+ )
142
+ self._flow = AsyncPrompty.load(source=self._prompty_file, model=prompty_model_config)
143
+
144
+ return super().__call__(*args, **kwargs)
@@ -0,0 +1,113 @@
1
+ ---
2
+ name: Groundedness
3
+ description: Evaluates groundedness score for RAG scenario
4
+ model:
5
+ api: chat
6
+ parameters:
7
+ temperature: 0.0
8
+ max_tokens: 800
9
+ top_p: 1.0
10
+ presence_penalty: 0
11
+ frequency_penalty: 0
12
+ response_format:
13
+ type: text
14
+
15
+ inputs:
16
+ query:
17
+ type: string
18
+ response:
19
+ type: string
20
+ context:
21
+ type: string
22
+
23
+
24
+ ---
25
+ system:
26
+ # Instruction
27
+ ## Goal
28
+ ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
29
+ - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
30
+ - **Data**: Your input data include CONTEXT, QUERY, and RESPONSE.
31
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
32
+
33
+ user:
34
+ # Definition
35
+ **Groundedness** refers to how well an answer is anchored in the provided context, evaluating its relevance, accuracy, and completeness based exclusively on that context. It assesses the extent to which the answer directly and fully addresses the question without introducing unrelated or incorrect information. The scale ranges from 1 to 5, with higher numbers indicating greater groundedness.
36
+
37
+ # Ratings
38
+ ## [Groundedness: 1] (Completely Unrelated Response)
39
+ **Definition:** An answer that does not relate to the question or the context in any way. It fails to address the topic, provides irrelevant information, or introduces completely unrelated subjects.
40
+
41
+ **Examples:**
42
+ **Context:** The company's annual meeting will be held next Thursday.
43
+ **Query:** When is the company's annual meeting?
44
+ **Response:** I enjoy hiking in the mountains during summer.
45
+
46
+ **Context:** The new policy aims to reduce carbon emissions by 20% over the next five years.
47
+ **Query:** What is the goal of the new policy?
48
+ **Response:** My favorite color is blue.
49
+
50
+ ## [Groundedness: 2] (Related Topic but Does Not Respond to the Query)
51
+ **Definition:** An answer that relates to the general topic of the context but does not answer the specific question asked. It may mention concepts from the context but fails to provide a direct or relevant response.
52
+
53
+ **Examples:**
54
+ **Context:** The museum will exhibit modern art pieces from various local artists.
55
+ **Query:** What kind of art will be exhibited at the museum?
56
+ **Response:** Museums are important cultural institutions.
57
+
58
+ **Context:** The new software update improves battery life and performance.
59
+ **Query:** What does the new software update improve?
60
+ **Response:** Software updates can sometimes fix bugs.
61
+
62
+ ## [Groundedness: 3] (Attempts to Respond but Contains Incorrect Information)
63
+ **Definition:** An answer that attempts to respond to the question but includes incorrect information not supported by the context. It may misstate facts, misinterpret the context, or provide erroneous details.
64
+
65
+ **Examples:**
66
+ **Context:** The festival starts on June 5th and features international musicians.
67
+ **Query:** When does the festival start?
68
+ **Response:** The festival starts on July 5th and features local artists.
69
+
70
+ **Context:** The recipe requires two eggs and one cup of milk.
71
+ **Query:** How many eggs are needed for the recipe?
72
+ **Response:** You need three eggs for the recipe.
73
+
74
+ ## [Groundedness: 4] (Partially Correct Response)
75
+ **Definition:** An answer that provides a correct response to the question but is incomplete or lacks specific details mentioned in the context. It captures some of the necessary information but omits key elements needed for a full understanding.
76
+
77
+ **Examples:**
78
+ **Context:** The bookstore offers a 15% discount to students and a 10% discount to senior citizens.
79
+ **Query:** What discount does the bookstore offer to students?
80
+ **Response:** Students get a discount at the bookstore.
81
+
82
+ **Context:** The company's headquarters are located in Berlin, Germany.
83
+ **Query:** Where are the company's headquarters?
84
+ **Response:** The company's headquarters are in Germany.
85
+
86
+ ## [Groundedness: 5] (Fully Correct and Complete Response)
87
+ **Definition:** An answer that thoroughly and accurately responds to the question, including all relevant details from the context. It directly addresses the question with precise information, demonstrating complete understanding without adding extraneous information.
88
+
89
+ **Examples:**
90
+ **Context:** The author released her latest novel, 'The Silent Echo', on September 1st.
91
+ **Query:** When was 'The Silent Echo' released?
92
+ **Response:** 'The Silent Echo' was released on September 1st.
93
+
94
+ **Context:** Participants must register by May 31st to be eligible for early bird pricing.
95
+ **Query:** By what date must participants register to receive early bird pricing?
96
+ **Response:** Participants must register by May 31st to receive early bird pricing.
97
+
98
+
99
+ # Data
100
+ CONTEXT: {{context}}
101
+ QUERY: {{query}}
102
+ RESPONSE: {{response}}
103
+
104
+
105
+ # Tasks
106
+ ## Please provide your assessment Score for the previous RESPONSE in relation to the CONTEXT and QUERY based on the Definitions above. Your output should include the following information:
107
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
108
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
109
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
110
+
111
+
112
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
113
+ # Output