azure-ai-evaluation 1.0.0__py3-none-any.whl → 1.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (105) hide show
  1. azure/ai/evaluation/__init__.py +5 -31
  2. azure/ai/evaluation/_common/constants.py +2 -9
  3. azure/ai/evaluation/_common/rai_service.py +120 -300
  4. azure/ai/evaluation/_common/utils.py +23 -381
  5. azure/ai/evaluation/_constants.py +6 -19
  6. azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/__init__.py +2 -3
  7. azure/ai/evaluation/_evaluate/{_batch_run/eval_run_context.py → _batch_run_client/batch_run_context.py} +7 -23
  8. azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/code_client.py +17 -33
  9. azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/proxy_client.py +4 -32
  10. azure/ai/evaluation/_evaluate/_eval_run.py +24 -81
  11. azure/ai/evaluation/_evaluate/_evaluate.py +239 -393
  12. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +17 -17
  13. azure/ai/evaluation/_evaluate/_utils.py +28 -82
  14. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +18 -17
  15. azure/ai/evaluation/_evaluators/{_retrieval → _chat}/__init__.py +2 -2
  16. azure/ai/evaluation/_evaluators/_chat/_chat.py +357 -0
  17. azure/ai/evaluation/_evaluators/{_service_groundedness → _chat/retrieval}/__init__.py +2 -2
  18. azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +157 -0
  19. azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +48 -0
  20. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +88 -78
  21. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +39 -76
  22. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +4 -0
  23. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +67 -105
  24. azure/ai/evaluation/_evaluators/{_multimodal/_content_safety_multimodal_base.py → _content_safety/_content_safety_base.py} +34 -24
  25. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +301 -0
  26. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +54 -105
  27. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +52 -99
  28. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +52 -101
  29. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +51 -101
  30. azure/ai/evaluation/_evaluators/_eci/_eci.py +54 -44
  31. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +19 -34
  32. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +89 -76
  33. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +41 -66
  34. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +16 -14
  35. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +87 -113
  36. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +54 -0
  37. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +27 -20
  38. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +80 -89
  39. azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +5 -0
  40. azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +104 -0
  41. azure/ai/evaluation/_evaluators/_qa/_qa.py +30 -23
  42. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +96 -84
  43. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +47 -78
  44. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +27 -26
  45. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +38 -53
  46. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +5 -0
  47. azure/ai/evaluation/_evaluators/_xpia/xpia.py +105 -91
  48. azure/ai/evaluation/_exceptions.py +7 -28
  49. azure/ai/evaluation/_http_utils.py +132 -203
  50. azure/ai/evaluation/_model_configurations.py +8 -104
  51. azure/ai/evaluation/_version.py +1 -1
  52. azure/ai/evaluation/simulator/__init__.py +1 -2
  53. azure/ai/evaluation/simulator/_adversarial_scenario.py +1 -20
  54. azure/ai/evaluation/simulator/_adversarial_simulator.py +92 -111
  55. azure/ai/evaluation/simulator/_constants.py +1 -11
  56. azure/ai/evaluation/simulator/_conversation/__init__.py +12 -13
  57. azure/ai/evaluation/simulator/_conversation/_conversation.py +4 -4
  58. azure/ai/evaluation/simulator/_direct_attack_simulator.py +67 -33
  59. azure/ai/evaluation/simulator/_helpers/__init__.py +2 -1
  60. azure/ai/evaluation/{_common → simulator/_helpers}/_experimental.py +9 -24
  61. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +5 -26
  62. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +94 -107
  63. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +22 -70
  64. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +11 -28
  65. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +4 -8
  66. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +24 -68
  67. azure/ai/evaluation/simulator/_model_tools/models.py +10 -10
  68. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +10 -6
  69. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +5 -6
  70. azure/ai/evaluation/simulator/_simulator.py +207 -277
  71. azure/ai/evaluation/simulator/_tracing.py +4 -4
  72. azure/ai/evaluation/simulator/_utils.py +13 -31
  73. azure_ai_evaluation-1.0.0b2.dist-info/METADATA +449 -0
  74. azure_ai_evaluation-1.0.0b2.dist-info/RECORD +99 -0
  75. {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/WHEEL +1 -1
  76. azure/ai/evaluation/_common/math.py +0 -89
  77. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +0 -46
  78. azure/ai/evaluation/_evaluators/_common/__init__.py +0 -13
  79. azure/ai/evaluation/_evaluators/_common/_base_eval.py +0 -344
  80. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +0 -88
  81. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +0 -133
  82. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +0 -113
  83. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +0 -99
  84. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +0 -20
  85. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +0 -132
  86. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +0 -100
  87. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +0 -124
  88. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +0 -100
  89. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +0 -100
  90. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +0 -100
  91. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +0 -112
  92. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +0 -93
  93. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +0 -148
  94. azure/ai/evaluation/_vendor/__init__.py +0 -3
  95. azure/ai/evaluation/_vendor/rouge_score/__init__.py +0 -14
  96. azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +0 -328
  97. azure/ai/evaluation/_vendor/rouge_score/scoring.py +0 -63
  98. azure/ai/evaluation/_vendor/rouge_score/tokenize.py +0 -63
  99. azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +0 -53
  100. azure/ai/evaluation/simulator/_data_sources/__init__.py +0 -3
  101. azure/ai/evaluation/simulator/_data_sources/grounding.json +0 -1150
  102. azure_ai_evaluation-1.0.0.dist-info/METADATA +0 -595
  103. azure_ai_evaluation-1.0.0.dist-info/NOTICE.txt +0 -70
  104. azure_ai_evaluation-1.0.0.dist-info/RECORD +0 -119
  105. {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/top_level.txt +0 -0
@@ -3,102 +3,115 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  import os
6
- from typing import Dict, List, Union
6
+ import re
7
7
 
8
- from typing_extensions import overload, override
8
+ import numpy as np
9
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
10
+ from promptflow.core import AsyncPrompty
9
11
 
10
- from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
- from azure.ai.evaluation._model_configurations import Conversation
12
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
12
13
 
14
+ from ..._common.utils import ensure_api_version_in_aoai_model_config, ensure_user_agent_in_aoai_model_config
13
15
 
14
- class FluencyEvaluator(PromptyEvaluatorBase[Union[str, float]]):
15
- """
16
- Evaluates the fluency of a given response or a multi-turn conversation, including reasoning.
16
+ try:
17
+ from ..._user_agent import USER_AGENT
18
+ except ImportError:
19
+ USER_AGENT = None
20
+
21
+
22
+ class _AsyncFluencyEvaluator:
23
+ # Constants must be defined within eval's directory to be save/loadable
24
+ PROMPTY_FILE = "fluency.prompty"
25
+ LLM_CALL_TIMEOUT = 600
26
+ DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
27
+
28
+ def __init__(self, model_config: dict):
29
+ ensure_api_version_in_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
30
+
31
+ prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
32
+
33
+ # Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
34
+ # https://github.com/encode/httpx/discussions/2959
35
+ prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
36
+
37
+ ensure_user_agent_in_aoai_model_config(
38
+ model_config,
39
+ prompty_model_config,
40
+ USER_AGENT,
41
+ )
42
+
43
+ current_dir = os.path.dirname(__file__)
44
+ prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
45
+ self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
46
+
47
+ async def __call__(self, *, query: str, response: str, **kwargs):
48
+ # Validate input parameters
49
+ query = str(query or "")
50
+ response = str(response or "")
51
+
52
+ if not (query.strip() and response.strip()):
53
+ msg = "Both 'query' and 'response' must be non-empty strings."
54
+ raise EvaluationException(
55
+ message=msg,
56
+ internal_message=msg,
57
+ error_category=ErrorCategory.MISSING_FIELD,
58
+ error_blame=ErrorBlame.USER_ERROR,
59
+ error_target=ErrorTarget.F1_EVALUATOR,
60
+ )
61
+
62
+ # Run the evaluation flow
63
+ llm_output = await self._flow(query=query, response=response, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
64
+
65
+ score = np.nan
66
+ if llm_output:
67
+ match = re.search(r"\d", llm_output)
68
+ if match:
69
+ score = float(match.group())
17
70
 
18
- The fluency measure assesses the extent to which the generated text conforms to grammatical rules, syntactic
19
- structures, and appropriate vocabulary usage, resulting in linguistically correct responses.
71
+ return {"gpt_fluency": float(score)}
20
72
 
21
- Fluency scores range from 1 to 5, with 1 being the least fluent and 5 being the most fluent.
73
+
74
+ class FluencyEvaluator:
75
+ """
76
+ Initialize a fluency evaluator configured for a specific Azure OpenAI model.
22
77
 
23
78
  :param model_config: Configuration for the Azure OpenAI model.
24
79
  :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
25
80
  ~azure.ai.evaluation.OpenAIModelConfiguration]
26
81
 
27
- .. admonition:: Example:
82
+ **Usage**
28
83
 
29
- .. literalinclude:: ../samples/evaluation_samples_evaluate.py
30
- :start-after: [START fluency_evaluator]
31
- :end-before: [END fluency_evaluator]
32
- :language: python
33
- :dedent: 8
34
- :caption: Initialize and call a FluencyEvaluator.
84
+ .. code-block:: python
35
85
 
36
- .. note::
86
+ eval_fn = FluencyEvaluator(model_config)
87
+ result = eval_fn(
88
+ query="What is the capital of Japan?",
89
+ response="The capital of Japan is Tokyo.")
37
90
 
38
- To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
39
- To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
40
- however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
41
- """
91
+ **Output format**
42
92
 
43
- _PROMPTY_FILE = "fluency.prompty"
44
- _RESULT_KEY = "fluency"
93
+ .. code-block:: python
45
94
 
46
- id = "azureml://registries/azureml/models/Fluency-Evaluator/versions/4"
47
- """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
95
+ {
96
+ "gpt_fluency": 4.0
97
+ }
98
+ """
48
99
 
49
- @override
50
- def __init__(self, model_config):
51
- current_dir = os.path.dirname(__file__)
52
- prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
53
- super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
100
+ def __init__(self, model_config: dict):
101
+ self._async_evaluator = _AsyncFluencyEvaluator(model_config)
54
102
 
55
- @overload
56
- def __call__(
57
- self,
58
- *,
59
- response: str,
60
- ) -> Dict[str, Union[str, float]]:
61
- """Evaluate fluency in given response
103
+ def __call__(self, *, query: str, response: str, **kwargs):
104
+ """
105
+ Evaluate fluency.
62
106
 
107
+ :keyword query: The query to be evaluated.
108
+ :paramtype query: str
63
109
  :keyword response: The response to be evaluated.
64
110
  :paramtype response: str
65
- :return: The fluency score
66
- :rtype: Dict[str, float]
67
- """
68
-
69
- @overload
70
- def __call__(
71
- self,
72
- *,
73
- conversation: Conversation,
74
- ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
75
- """Evaluate fluency for a conversation
76
-
77
- :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
78
- key "messages", and potentially a global context under the key "context". Conversation turns are expected
79
- to be dictionaries with keys "content", "role", and possibly "context".
80
- :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
81
- :return: The fluency score
82
- :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
83
- """
84
-
85
- @override
86
- def __call__( # pylint: disable=docstring-missing-param
87
- self,
88
- *args,
89
- **kwargs,
90
- ):
91
- """
92
- Evaluate fluency. Accepts either a response for a single evaluation,
93
- or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
94
- the evaluator will aggregate the results of each turn.
95
-
96
- :keyword response: The response to be evaluated. Mutually exclusive with the "conversation" parameter.
97
- :paramtype response: Optional[str]
98
- :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
99
- key "messages". Conversation turns are expected to be dictionaries with keys "content" and "role".
100
- :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
101
111
  :return: The fluency score.
102
- :rtype: Union[Dict[str, float], Dict[str, Union[float, Dict[str, List[float]]]]]
112
+ :rtype: dict
103
113
  """
104
- return super().__call__(*args, **kwargs)
114
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
115
+
116
+ def _to_async(self):
117
+ return self._async_evaluator
@@ -3,9 +3,14 @@ name: Fluency
3
3
  description: Evaluates fluency score for QA scenario
4
4
  model:
5
5
  api: chat
6
+ configuration:
7
+ type: azure_openai
8
+ azure_deployment: ${env:AZURE_DEPLOYMENT}
9
+ api_key: ${env:AZURE_OPENAI_API_KEY}
10
+ azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
6
11
  parameters:
7
12
  temperature: 0.0
8
- max_tokens: 800
13
+ max_tokens: 1
9
14
  top_p: 1.0
10
15
  presence_penalty: 0
11
16
  frequency_penalty: 0
@@ -13,74 +18,44 @@ model:
13
18
  type: text
14
19
 
15
20
  inputs:
21
+ query:
22
+ type: string
16
23
  response:
17
24
  type: string
18
25
 
19
26
  ---
20
27
  system:
21
- # Instruction
22
- ## Goal
23
- ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
24
- - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
25
- - **Data**: Your input data include a RESPONSE.
26
- - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
27
-
28
+ You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. You should return a single integer value between 1 to 5 representing the evaluation metric. You will include no other text or information.
28
29
  user:
29
- # Definition
30
- **Fluency** refers to the effectiveness and clarity of written communication, focusing on grammatical accuracy, vocabulary range, sentence complexity, coherence, and overall readability. It assesses how smoothly ideas are conveyed and how easily the text can be understood by the reader.
31
-
32
- # Ratings
33
- ## [Fluency: 1] (Emergent Fluency)
34
- **Definition:** The response shows minimal command of the language. It contains pervasive grammatical errors, extremely limited vocabulary, and fragmented or incoherent sentences. The message is largely incomprehensible, making understanding very difficult.
35
-
36
- **Examples:**
37
- **Response:** Free time I. Go park. Not fun. Alone.
38
-
39
- **Response:** Like food pizza. Good cheese eat.
40
-
41
- ## [Fluency: 2] (Basic Fluency)
42
- **Definition:** The response communicates simple ideas but has frequent grammatical errors and limited vocabulary. Sentences are short and may be improperly constructed, leading to partial understanding. Repetition and awkward phrasing are common.
43
-
44
- **Examples:**
45
- **Response:** I like play soccer. I watch movie. It fun.
46
-
47
- **Response:** My town small. Many people. We have market.
48
-
49
- ## [Fluency: 3] (Competent Fluency)
50
- **Definition:** The response clearly conveys ideas with occasional grammatical errors. Vocabulary is adequate but not extensive. Sentences are generally correct but may lack complexity and variety. The text is coherent, and the message is easily understood with minimal effort.
51
-
52
- **Examples:**
53
- **Response:** I'm planning to visit friends and maybe see a movie together.
54
-
55
- **Response:** I try to eat healthy food and exercise regularly by jogging.
56
-
57
- ## [Fluency: 4] (Proficient Fluency)
58
- **Definition:** The response is well-articulated with good control of grammar and a varied vocabulary. Sentences are complex and well-structured, demonstrating coherence and cohesion. Minor errors may occur but do not affect overall understanding. The text flows smoothly, and ideas are connected logically.
59
-
60
- **Examples:**
61
- **Response:** My interest in mathematics and problem-solving inspired me to become an engineer, as I enjoy designing solutions that improve people's lives.
62
-
63
- **Response:** Environmental conservation is crucial because it protects ecosystems, preserves biodiversity, and ensures natural resources are available for future generations.
64
-
65
- ## [Fluency: 5] (Exceptional Fluency)
66
- **Definition:** The response demonstrates an exceptional command of language with sophisticated vocabulary and complex, varied sentence structures. It is coherent, cohesive, and engaging, with precise and nuanced expression. Grammar is flawless, and the text reflects a high level of eloquence and style.
67
-
68
- **Examples:**
69
- **Response:** Globalization exerts a profound influence on cultural diversity by facilitating unprecedented cultural exchange while simultaneously risking the homogenization of distinct cultural identities, which can diminish the richness of global heritage.
70
-
71
- **Response:** Technology revolutionizes modern education by providing interactive learning platforms, enabling personalized learning experiences, and connecting students worldwide, thereby transforming how knowledge is acquired and shared.
72
-
73
-
74
- # Data
75
- RESPONSE: {{response}}
76
-
77
-
78
- # Tasks
79
- ## Please provide your assessment Score for the previous RESPONSE based on the Definitions above. Your output should include the following information:
80
- - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
81
- - **Explanation**: a very short explanation of why you think the input Data should get that Score.
82
- - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
83
-
84
-
85
- ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
86
- # Output
30
+ Fluency measures the quality of individual sentences in the answer, and whether they are well-written and grammatically correct. Consider the quality of individual sentences when evaluating fluency. Given the question and answer, score the fluency of the answer between one to five stars using the following rating scale:
31
+ One star: the answer completely lacks fluency
32
+ Two stars: the answer mostly lacks fluency
33
+ Three stars: the answer is partially fluent
34
+ Four stars: the answer is mostly fluent
35
+ Five stars: the answer has perfect fluency
36
+
37
+ This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5.
38
+
39
+ question: What did you have for breakfast today?
40
+ answer: Breakfast today, me eating cereal and orange juice very good.
41
+ stars: 1
42
+
43
+ question: How do you feel when you travel alone?
44
+ answer: Alone travel, nervous, but excited also. I feel adventure and like its time.
45
+ stars: 2
46
+
47
+ question: When was the last time you went on a family vacation?
48
+ answer: Last family vacation, it took place in last summer. We traveled to a beach destination, very fun.
49
+ stars: 3
50
+
51
+ question: What is your favorite thing about your job?
52
+ answer: My favorite aspect of my job is the chance to interact with diverse people. I am constantly learning from their experiences and stories.
53
+ stars: 4
54
+
55
+ question: Can you describe your morning routine?
56
+ answer: Every morning, I wake up at 6 am, drink a glass of water, and do some light stretching. After that, I take a shower and get dressed for work. Then, I have a healthy breakfast, usually consisting of oatmeal and fruits, before leaving the house around 7:30 am.
57
+ stars: 5
58
+
59
+ question: {{query}}
60
+ answer: {{response}}
61
+ stars:
@@ -24,28 +24,30 @@ class _AsyncGleuScoreEvaluator:
24
24
 
25
25
  class GleuScoreEvaluator:
26
26
  """
27
- Calculates the GLEU (Google-BLEU) score between a response and the ground truth.
27
+ Evaluator that computes the BLEU Score between two strings.
28
28
 
29
29
  The GLEU (Google-BLEU) score evaluator measures the similarity between generated and reference texts by
30
30
  evaluating n-gram overlap, considering both precision and recall. This balanced evaluation, designed for
31
31
  sentence-level assessment, makes it ideal for detailed analysis of translation quality. GLEU is well-suited for
32
32
  use cases such as machine translation, text summarization, and text generation.
33
33
 
34
- GLEU scores range from 0 to 1, where a value of 1 represents perfect overlap between the response and
35
- the ground truth and a value of 0 indicates no overlap.
34
+ **Usage**
36
35
 
37
- .. admonition:: Example:
36
+ .. code-block:: python
38
37
 
39
- .. literalinclude:: ../samples/evaluation_samples_evaluate.py
40
- :start-after: [START gleu_score_evaluator]
41
- :end-before: [END gleu_score_evaluator]
42
- :language: python
43
- :dedent: 8
44
- :caption: Initialize and call a GleuScoreEvaluator.
45
- """
38
+ eval_fn = GleuScoreEvaluator()
39
+ result = eval_fn(
40
+ response="Tokyo is the capital of Japan.",
41
+ ground_truth="The capital of Japan is Tokyo.")
42
+
43
+ **Output format**
44
+
45
+ .. code-block:: python
46
46
 
47
- id = "azureml://registries/azureml/models/Gleu-Score-Evaluator/versions/3"
48
- """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
47
+ {
48
+ "gleu_score": 0.41
49
+ }
50
+ """
49
51
 
50
52
  def __init__(self):
51
53
  self._async_evaluator = _AsyncGleuScoreEvaluator()
@@ -59,7 +61,7 @@ class GleuScoreEvaluator:
59
61
  :keyword ground_truth: The ground truth to be compared against.
60
62
  :paramtype ground_truth: str
61
63
  :return: The GLEU score.
62
- :rtype: Dict[str, float]
64
+ :rtype: dict
63
65
  """
64
66
  return async_run_allowing_running_loop(
65
67
  self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
@@ -1,144 +1,118 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
+
4
5
  import os
5
- from typing import Dict, List, Optional, Union
6
+ import re
6
7
 
7
- from typing_extensions import overload, override
8
+ import numpy as np
9
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
8
10
  from promptflow.core import AsyncPrompty
9
11
 
10
- from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
- from azure.ai.evaluation._model_configurations import Conversation
12
- from ..._common.utils import construct_prompty_model_config, validate_model_config
12
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
13
+
14
+ from ..._common.utils import ensure_api_version_in_aoai_model_config, ensure_user_agent_in_aoai_model_config
13
15
 
14
16
  try:
15
17
  from ..._user_agent import USER_AGENT
16
18
  except ImportError:
17
- USER_AGENT = "None"
19
+ USER_AGENT = None
18
20
 
19
21
 
20
- class GroundednessEvaluator(PromptyEvaluatorBase[Union[str, float]]):
21
- """
22
- Evaluates groundedness score for a given query (optional), response, and context or a multi-turn conversation,
23
- including reasoning.
22
+ class _AsyncGroundednessEvaluator:
23
+ # Constants must be defined within eval's directory to be save/loadable
24
+ PROMPTY_FILE = "groundedness.prompty"
25
+ LLM_CALL_TIMEOUT = 600
26
+ DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
24
27
 
25
- The groundedness measure assesses the correspondence between claims in an AI-generated answer and the source
26
- context, making sure that these claims are substantiated by the context. Even if the responses from LLM are
27
- factually correct, they'll be considered ungrounded if they can't be verified against the provided sources
28
- (such as your input source or your database). Use the groundedness metric when you need to verify that
29
- AI-generated responses align with and are validated by the provided context.
28
+ def __init__(self, model_config: dict):
29
+ ensure_api_version_in_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
30
30
 
31
- Groundedness scores range from 1 to 5, with 1 being the least grounded and 5 being the most grounded.
31
+ prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
32
32
 
33
- :param model_config: Configuration for the Azure OpenAI model.
34
- :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
35
- ~azure.ai.evaluation.OpenAIModelConfiguration]
33
+ # Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
34
+ # https://github.com/encode/httpx/discussions/2959
35
+ prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
36
36
 
37
- .. admonition:: Example:
37
+ ensure_user_agent_in_aoai_model_config(
38
+ model_config,
39
+ prompty_model_config,
40
+ USER_AGENT,
41
+ )
42
+
43
+ current_dir = os.path.dirname(__file__)
44
+ prompty_path = os.path.join(current_dir, "groundedness.prompty")
45
+ self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
46
+
47
+ async def __call__(self, *, response: str, context: str, **kwargs):
48
+ # Validate input parameters
49
+ response = str(response or "")
50
+ context = str(context or "")
51
+
52
+ if not response.strip() or not context.strip():
53
+ msg = "Both 'response' and 'context' must be non-empty strings."
54
+ raise EvaluationException(
55
+ message=msg,
56
+ internal_message=msg,
57
+ error_category=ErrorCategory.MISSING_FIELD,
58
+ error_blame=ErrorBlame.USER_ERROR,
59
+ error_target=ErrorTarget.F1_EVALUATOR,
60
+ )
38
61
 
39
- .. literalinclude:: ../samples/evaluation_samples_evaluate.py
40
- :start-after: [START groundedness_evaluator]
41
- :end-before: [END groundedness_evaluator]
42
- :language: python
43
- :dedent: 8
44
- :caption: Initialize and call a GroundednessEvaluator.
62
+ # Run the evaluation flow
63
+ llm_output = await self._flow(response=response, context=context, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
45
64
 
46
- .. note::
65
+ score = np.nan
66
+ if llm_output:
67
+ match = re.search(r"\d", llm_output)
68
+ if match:
69
+ score = float(match.group())
47
70
 
48
- To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
49
- To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
50
- however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
71
+ return {"gpt_groundedness": float(score)}
72
+
73
+
74
+ class GroundednessEvaluator:
51
75
  """
76
+ Initialize a groundedness evaluator configured for a specific Azure OpenAI model.
77
+
78
+ :param model_config: Configuration for the Azure OpenAI model.
79
+ :type model_config: Union[~azure.ai.evalation.AzureOpenAIModelConfiguration,
80
+ ~azure.ai.evalation.OpenAIModelConfiguration]
52
81
 
53
- _PROMPTY_FILE_NO_QUERY = "groundedness_without_query.prompty"
54
- _PROMPTY_FILE_WITH_QUERY = "groundedness_with_query.prompty"
55
- _RESULT_KEY = "groundedness"
56
- _OPTIONAL_PARAMS = ["query"]
82
+ **Usage**
57
83
 
58
- id = "azureml://registries/azureml/models/Groundedness-Evaluator/versions/4"
59
- """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
84
+ .. code-block:: python
60
85
 
61
- @override
62
- def __init__(self, model_config):
63
- current_dir = os.path.dirname(__file__)
64
- prompty_path = os.path.join(current_dir, self._PROMPTY_FILE_NO_QUERY) # Default to no query
65
-
66
- super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
67
- self._model_config = model_config
68
- # Needs to be set because it's used in call method to re-validate prompt if `query` is provided
69
-
70
- @overload
71
- def __call__(
72
- self,
73
- *,
74
- response: str,
75
- context: str,
76
- query: Optional[str] = None,
77
- ) -> Dict[str, Union[str, float]]:
78
- """Evaluate groundedness for given input of response, context
86
+ eval_fn = GroundednessEvaluator(model_config)
87
+ result = eval_fn(
88
+ response="The capital of Japan is Tokyo.",
89
+ context="Tokyo is Japan's capital, known for its blend of traditional culture \
90
+ and technological advancements.")
91
+
92
+ **Output format**
93
+
94
+ .. code-block:: python
95
+
96
+ {
97
+ "gpt_groundedness": 5
98
+ }
99
+ """
100
+
101
+ def __init__(self, model_config: dict):
102
+ self._async_evaluator = _AsyncGroundednessEvaluator(model_config)
103
+
104
+ def __call__(self, *, response: str, context: str, **kwargs):
105
+ """
106
+ Evaluate groundedness of the response in the context.
79
107
 
80
108
  :keyword response: The response to be evaluated.
81
109
  :paramtype response: str
82
- :keyword context: The context to be evaluated.
110
+ :keyword context: The context in which the response is evaluated.
83
111
  :paramtype context: str
84
- :keyword query: The query to be evaluated. Optional parameter for use with the `response`
85
- and `context` parameters. If provided, a different prompt template will be used for evaluation.
86
- :paramtype query: Optional[str]
87
112
  :return: The groundedness score.
88
- :rtype: Dict[str, float]
113
+ :rtype: dict
89
114
  """
115
+ return async_run_allowing_running_loop(self._async_evaluator, response=response, context=context, **kwargs)
90
116
 
91
- @overload
92
- def __call__(
93
- self,
94
- *,
95
- conversation: Conversation,
96
- ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
97
- """Evaluate groundedness for a conversation
98
-
99
- :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
100
- key "messages", and potentially a global context under the key "context". Conversation turns are expected
101
- to be dictionaries with keys "content", "role", and possibly "context".
102
- :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
103
- :return: The groundedness score.
104
- :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
105
- """
106
-
107
- @override
108
- def __call__( # pylint: disable=docstring-missing-param
109
- self,
110
- *args,
111
- **kwargs,
112
- ):
113
- """Evaluate groundedness. Accepts either a query, response, and context for a single evaluation,
114
- or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
115
- the evaluator will aggregate the results of each turn.
116
-
117
- :keyword query: The query to be evaluated. Mutually exclusive with `conversation`. Optional parameter for use
118
- with the `response` and `context` parameters. If provided, a different prompt template will be used for
119
- evaluation.
120
- :paramtype query: Optional[str]
121
- :keyword response: The response to be evaluated. Mutually exclusive with the `conversation` parameter.
122
- :paramtype response: Optional[str]
123
- :keyword context: The context to be evaluated. Mutually exclusive with the `conversation` parameter.
124
- :paramtype context: Optional[str]
125
- :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
126
- key "messages", and potentially a global context under the key "context". Conversation turns are expected
127
- to be dictionaries with keys "content", "role", and possibly "context".
128
- :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
129
- :return: The relevance score.
130
- :rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
131
- """
132
-
133
- if kwargs.get("query", None):
134
- current_dir = os.path.dirname(__file__)
135
- prompty_path = os.path.join(current_dir, self._PROMPTY_FILE_WITH_QUERY)
136
- self._prompty_file = prompty_path
137
- prompty_model_config = construct_prompty_model_config(
138
- validate_model_config(self._model_config),
139
- self._DEFAULT_OPEN_API_VERSION,
140
- USER_AGENT,
141
- )
142
- self._flow = AsyncPrompty.load(source=self._prompty_file, model=prompty_model_config)
143
-
144
- return super().__call__(*args, **kwargs)
117
+ def _to_async(self):
118
+ return self._async_evaluator