azure-ai-evaluation 0.0.0b0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. azure/ai/evaluation/__init__.py +82 -0
  2. azure/ai/evaluation/_common/__init__.py +16 -0
  3. azure/ai/evaluation/_common/_experimental.py +172 -0
  4. azure/ai/evaluation/_common/constants.py +72 -0
  5. azure/ai/evaluation/_common/math.py +89 -0
  6. azure/ai/evaluation/_common/rai_service.py +632 -0
  7. azure/ai/evaluation/_common/utils.py +445 -0
  8. azure/ai/evaluation/_constants.py +72 -0
  9. azure/ai/evaluation/_evaluate/__init__.py +3 -0
  10. azure/ai/evaluation/_evaluate/_batch_run/__init__.py +9 -0
  11. azure/ai/evaluation/_evaluate/_batch_run/code_client.py +188 -0
  12. azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +89 -0
  13. azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +99 -0
  14. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +46 -0
  15. azure/ai/evaluation/_evaluate/_eval_run.py +571 -0
  16. azure/ai/evaluation/_evaluate/_evaluate.py +850 -0
  17. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +179 -0
  18. azure/ai/evaluation/_evaluate/_utils.py +298 -0
  19. azure/ai/evaluation/_evaluators/__init__.py +3 -0
  20. azure/ai/evaluation/_evaluators/_bleu/__init__.py +9 -0
  21. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +72 -0
  22. azure/ai/evaluation/_evaluators/_coherence/__init__.py +7 -0
  23. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +107 -0
  24. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +99 -0
  25. azure/ai/evaluation/_evaluators/_common/__init__.py +13 -0
  26. azure/ai/evaluation/_evaluators/_common/_base_eval.py +344 -0
  27. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +88 -0
  28. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +133 -0
  29. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +17 -0
  30. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +144 -0
  31. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +129 -0
  32. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +123 -0
  33. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +125 -0
  34. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +126 -0
  35. azure/ai/evaluation/_evaluators/_eci/__init__.py +0 -0
  36. azure/ai/evaluation/_evaluators/_eci/_eci.py +89 -0
  37. azure/ai/evaluation/_evaluators/_f1_score/__init__.py +9 -0
  38. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +157 -0
  39. azure/ai/evaluation/_evaluators/_fluency/__init__.py +9 -0
  40. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +104 -0
  41. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +86 -0
  42. azure/ai/evaluation/_evaluators/_gleu/__init__.py +9 -0
  43. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +69 -0
  44. azure/ai/evaluation/_evaluators/_groundedness/__init__.py +9 -0
  45. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +144 -0
  46. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
  47. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
  48. azure/ai/evaluation/_evaluators/_meteor/__init__.py +9 -0
  49. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +90 -0
  50. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
  51. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +132 -0
  52. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +55 -0
  53. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +100 -0
  54. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +124 -0
  55. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +100 -0
  56. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +100 -0
  57. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +100 -0
  58. azure/ai/evaluation/_evaluators/_protected_material/__init__.py +5 -0
  59. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +113 -0
  60. azure/ai/evaluation/_evaluators/_qa/__init__.py +9 -0
  61. azure/ai/evaluation/_evaluators/_qa/_qa.py +93 -0
  62. azure/ai/evaluation/_evaluators/_relevance/__init__.py +9 -0
  63. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +114 -0
  64. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +100 -0
  65. azure/ai/evaluation/_evaluators/_retrieval/__init__.py +9 -0
  66. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +112 -0
  67. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +93 -0
  68. azure/ai/evaluation/_evaluators/_rouge/__init__.py +10 -0
  69. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +98 -0
  70. azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
  71. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +148 -0
  72. azure/ai/evaluation/_evaluators/_similarity/__init__.py +9 -0
  73. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +140 -0
  74. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +66 -0
  75. azure/ai/evaluation/_evaluators/_xpia/__init__.py +5 -0
  76. azure/ai/evaluation/_evaluators/_xpia/xpia.py +125 -0
  77. azure/ai/evaluation/_exceptions.py +128 -0
  78. azure/ai/evaluation/_http_utils.py +466 -0
  79. azure/ai/evaluation/_model_configurations.py +123 -0
  80. azure/ai/evaluation/_user_agent.py +6 -0
  81. azure/ai/evaluation/_vendor/__init__.py +3 -0
  82. azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
  83. azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +328 -0
  84. azure/ai/evaluation/_vendor/rouge_score/scoring.py +63 -0
  85. azure/ai/evaluation/_vendor/rouge_score/tokenize.py +63 -0
  86. azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
  87. azure/ai/evaluation/_version.py +5 -0
  88. azure/ai/evaluation/py.typed +0 -0
  89. azure/ai/evaluation/simulator/__init__.py +16 -0
  90. azure/ai/evaluation/simulator/_adversarial_scenario.py +46 -0
  91. azure/ai/evaluation/simulator/_adversarial_simulator.py +471 -0
  92. azure/ai/evaluation/simulator/_constants.py +27 -0
  93. azure/ai/evaluation/simulator/_conversation/__init__.py +316 -0
  94. azure/ai/evaluation/simulator/_conversation/_conversation.py +178 -0
  95. azure/ai/evaluation/simulator/_conversation/constants.py +30 -0
  96. azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
  97. azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
  98. azure/ai/evaluation/simulator/_direct_attack_simulator.py +218 -0
  99. azure/ai/evaluation/simulator/_helpers/__init__.py +4 -0
  100. azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +17 -0
  101. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +96 -0
  102. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +220 -0
  103. azure/ai/evaluation/simulator/_model_tools/__init__.py +23 -0
  104. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +195 -0
  105. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +244 -0
  106. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +168 -0
  107. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +201 -0
  108. azure/ai/evaluation/simulator/_model_tools/models.py +614 -0
  109. azure/ai/evaluation/simulator/_prompty/__init__.py +0 -0
  110. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +65 -0
  111. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +37 -0
  112. azure/ai/evaluation/simulator/_simulator.py +716 -0
  113. azure/ai/evaluation/simulator/_tracing.py +89 -0
  114. azure/ai/evaluation/simulator/_utils.py +132 -0
  115. azure_ai_evaluation-1.0.0.dist-info/METADATA +595 -0
  116. azure_ai_evaluation-1.0.0.dist-info/NOTICE.txt +70 -0
  117. azure_ai_evaluation-1.0.0.dist-info/RECORD +119 -0
  118. {azure_ai_evaluation-0.0.0b0.dist-info → azure_ai_evaluation-1.0.0.dist-info}/WHEEL +1 -1
  119. azure_ai_evaluation-1.0.0.dist-info/top_level.txt +1 -0
  120. azure_ai_evaluation-0.0.0b0.dist-info/METADATA +0 -7
  121. azure_ai_evaluation-0.0.0b0.dist-info/RECORD +0 -4
  122. azure_ai_evaluation-0.0.0b0.dist-info/top_level.txt +0 -1
@@ -0,0 +1,93 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from concurrent.futures import as_completed
6
+ from typing import Callable, Dict, List, Union
7
+
8
+ from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
9
+
10
+ from .._coherence import CoherenceEvaluator
11
+ from .._f1_score import F1ScoreEvaluator
12
+ from .._fluency import FluencyEvaluator
13
+ from .._groundedness import GroundednessEvaluator
14
+ from .._relevance import RelevanceEvaluator
15
+ from .._similarity import SimilarityEvaluator
16
+
17
+
18
+ class QAEvaluator:
19
+ """
20
+ Initialize a question-answer evaluator configured for a specific Azure OpenAI model.
21
+
22
+ :param model_config: Configuration for the Azure OpenAI model.
23
+ :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
24
+ ~azure.ai.evaluation.OpenAIModelConfiguration]
25
+ :return: A callable class that evaluates and generates metrics for "question-answering" scenario.
26
+ :param kwargs: Additional arguments to pass to the evaluator.
27
+ :type kwargs: Any
28
+
29
+ .. admonition:: Example:
30
+
31
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
32
+ :start-after: [START qa_evaluator]
33
+ :end-before: [END qa_evaluator]
34
+ :language: python
35
+ :dedent: 8
36
+ :caption: Initialize and call a QAEvaluator.
37
+
38
+ .. note::
39
+
40
+ To align with our support of a diverse set of models, keys without the `gpt_` prefix has been added.
41
+ To maintain backwards compatibility, the old keys with the `gpt_` prefix are still be present in the output;
42
+ however, it is recommended to use the new keys moving forward as the old keys will be deprecated in the future.
43
+ """
44
+
45
+ id = "qa"
46
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
47
+
48
+ def __init__(self, model_config, **kwargs):
49
+ self._parallel = kwargs.pop("_parallel", False)
50
+
51
+ self._evaluators: List[Union[Callable[..., Dict[str, Union[str, float]]], Callable[..., Dict[str, float]]]] = [
52
+ GroundednessEvaluator(model_config),
53
+ RelevanceEvaluator(model_config),
54
+ CoherenceEvaluator(model_config),
55
+ FluencyEvaluator(model_config),
56
+ SimilarityEvaluator(model_config),
57
+ F1ScoreEvaluator(),
58
+ ]
59
+
60
+ def __call__(self, *, query: str, response: str, context: str, ground_truth: str, **kwargs):
61
+ """
62
+ Evaluates question-answering scenario.
63
+
64
+ :keyword query: The query to be evaluated.
65
+ :paramtype query: str
66
+ :keyword response: The response to be evaluated.
67
+ :paramtype response: str
68
+ :keyword context: The context to be evaluated.
69
+ :paramtype context: str
70
+ :keyword ground_truth: The ground truth to be evaluated.
71
+ :paramtype ground_truth: str
72
+ :return: The scores for QA scenario.
73
+ :rtype: Dict[str, Union[str, float]]
74
+ """
75
+ results: Dict[str, Union[str, float]] = {}
76
+ if self._parallel:
77
+ with ThreadPoolExecutor() as executor:
78
+ futures = {
79
+ executor.submit(
80
+ evaluator, query=query, response=response, context=context, ground_truth=ground_truth, **kwargs
81
+ ): evaluator
82
+ for evaluator in self._evaluators
83
+ }
84
+
85
+ # Collect results as they complete
86
+ for future in as_completed(futures):
87
+ results.update(future.result())
88
+ else:
89
+ for evaluator in self._evaluators:
90
+ result = evaluator(query=query, response=response, context=context, ground_truth=ground_truth, **kwargs)
91
+ results.update(result)
92
+
93
+ return results
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._relevance import RelevanceEvaluator
6
+
7
+ __all__ = [
8
+ "RelevanceEvaluator",
9
+ ]
@@ -0,0 +1,114 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ import os
6
+ from typing import Dict, Union, List
7
+
8
+ from typing_extensions import overload, override
9
+
10
+ from azure.ai.evaluation._model_configurations import Conversation
11
+ from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
12
+
13
+
14
+ class RelevanceEvaluator(PromptyEvaluatorBase):
15
+ """
16
+ Evaluates relevance score for a given query and response or a multi-turn conversation, including reasoning.
17
+
18
+ The relevance measure assesses the ability of answers to capture the key points of the context.
19
+ High relevance scores signify the AI system's understanding of the input and its capability to produce coherent
20
+ and contextually appropriate outputs. Conversely, low relevance scores indicate that generated responses might
21
+ be off-topic, lacking in context, or insufficient in addressing the user's intended queries. Use the relevance
22
+ metric when evaluating the AI system's performance in understanding the input and generating contextually
23
+ appropriate responses.
24
+
25
+ Relevance scores range from 1 to 5, with 1 being the worst and 5 being the best.
26
+
27
+ :param model_config: Configuration for the Azure OpenAI model.
28
+ :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
29
+ ~azure.ai.evaluation.OpenAIModelConfiguration]
30
+
31
+ .. admonition:: Example:
32
+
33
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
34
+ :start-after: [START relevance_evaluator]
35
+ :end-before: [END relevance_evaluator]
36
+ :language: python
37
+ :dedent: 8
38
+ :caption: Initialize and call a RelevanceEvaluator with a query, response, and context.
39
+
40
+ .. note::
41
+
42
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
43
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
44
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
45
+ """
46
+
47
+ # Constants must be defined within eval's directory to be save/loadable
48
+ _PROMPTY_FILE = "relevance.prompty"
49
+ _RESULT_KEY = "relevance"
50
+
51
+ id = "azureml://registries/azureml/models/Relevance-Evaluator/versions/4"
52
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
53
+
54
+ @override
55
+ def __init__(self, model_config):
56
+ current_dir = os.path.dirname(__file__)
57
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
58
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
59
+
60
+ @overload
61
+ def __call__(
62
+ self,
63
+ *,
64
+ query: str,
65
+ response: str,
66
+ ) -> Dict[str, Union[str, float]]:
67
+ """Evaluate groundedness for given input of query, response, context
68
+
69
+ :keyword query: The query to be evaluated.
70
+ :paramtype query: str
71
+ :keyword response: The response to be evaluated.
72
+ :paramtype response: str
73
+ :return: The relevance score.
74
+ :rtype: Dict[str, float]
75
+ """
76
+
77
+ @overload
78
+ def __call__(
79
+ self,
80
+ *,
81
+ conversation: Conversation,
82
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
83
+ """Evaluate relevance for a conversation
84
+
85
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
86
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
87
+ to be dictionaries with keys "content", "role", and possibly "context".
88
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
89
+ :return: The relevance score.
90
+ :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
91
+ """
92
+
93
+ @override
94
+ def __call__( # pylint: disable=docstring-missing-param
95
+ self,
96
+ *args,
97
+ **kwargs,
98
+ ):
99
+ """Evaluate relevance. Accepts either a query and response for a single evaluation,
100
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
101
+ the evaluator will aggregate the results of each turn.
102
+
103
+ :keyword query: The query to be evaluated. Mutually exclusive with the `conversation` parameter.
104
+ :paramtype query: Optional[str]
105
+ :keyword response: The response to be evaluated. Mutually exclusive with the `conversation` parameter.
106
+ :paramtype response: Optional[str]
107
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
108
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
109
+ to be dictionaries with keys "content", "role", and possibly "context".
110
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
111
+ :return: The relevance score.
112
+ :rtype: Union[Dict[str, Union[str, float]], Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]]
113
+ """
114
+ return super().__call__(*args, **kwargs)
@@ -0,0 +1,100 @@
1
+ ---
2
+ name: Relevance
3
+ description: Evaluates relevance score for QA scenario
4
+ model:
5
+ api: chat
6
+ parameters:
7
+ temperature: 0.0
8
+ max_tokens: 800
9
+ top_p: 1.0
10
+ presence_penalty: 0
11
+ frequency_penalty: 0
12
+ response_format:
13
+ type: text
14
+
15
+ inputs:
16
+ query:
17
+ type: string
18
+ response:
19
+ type: string
20
+
21
+ ---
22
+ system:
23
+ # Instruction
24
+ ## Goal
25
+ ### You are an expert in evaluating the quality of a RESPONSE from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
26
+ - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
27
+ - **Data**: Your input data include QUERY and RESPONSE.
28
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
29
+
30
+ user:
31
+ # Definition
32
+ **Relevance** refers to how effectively a response addresses a question. It assesses the accuracy, completeness, and direct relevance of the response based solely on the given information.
33
+
34
+ # Ratings
35
+ ## [Relevance: 1] (Irrelevant Response)
36
+ **Definition:** The response is unrelated to the question. It provides information that is off-topic and does not attempt to address the question posed.
37
+
38
+ **Examples:**
39
+ **Query:** What is the team preparing for?
40
+ **Response:** I went grocery shopping yesterday evening.
41
+
42
+ **Query:** When will the company's new product line launch?
43
+ **Response:** International travel can be very rewarding and educational.
44
+
45
+ ## [Relevance: 2] (Incorrect Response)
46
+ **Definition:** The response attempts to address the question but includes incorrect information. It provides a response that is factually wrong based on the provided information.
47
+
48
+ **Examples:**
49
+ **Query:** When was the merger between the two firms finalized?
50
+ **Response:** The merger was finalized on April 10th.
51
+
52
+ **Query:** Where and when will the solar eclipse be visible?
53
+ **Response:** The solar eclipse will be visible in Asia on December 14th.
54
+
55
+ ## [Relevance: 3] (Incomplete Response)
56
+ **Definition:** The response addresses the question but omits key details necessary for a full understanding. It provides a partial response that lacks essential information.
57
+
58
+ **Examples:**
59
+ **Query:** What type of food does the new restaurant offer?
60
+ **Response:** The restaurant offers Italian food like pasta.
61
+
62
+ **Query:** What topics will the conference cover?
63
+ **Response:** The conference will cover renewable energy and climate change.
64
+
65
+ ## [Relevance: 4] (Complete Response)
66
+ **Definition:** The response fully addresses the question with accurate and complete information. It includes all essential details required for a comprehensive understanding, without adding any extraneous information.
67
+
68
+ **Examples:**
69
+ **Query:** What type of food does the new restaurant offer?
70
+ **Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto.
71
+
72
+ **Query:** What topics will the conference cover?
73
+ **Response:** The conference will cover renewable energy, climate change, and sustainability practices.
74
+
75
+ ## [Relevance: 5] (Comprehensive Response with Insights)
76
+ **Definition:** The response not only fully and accurately addresses the question but also includes additional relevant insights or elaboration. It may explain the significance, implications, or provide minor inferences that enhance understanding.
77
+
78
+ **Examples:**
79
+ **Query:** What type of food does the new restaurant offer?
80
+ **Response:** The new restaurant offers Italian cuisine, featuring dishes like pasta, pizza, and risotto, aiming to provide customers with an authentic Italian dining experience.
81
+
82
+ **Query:** What topics will the conference cover?
83
+ **Response:** The conference will cover renewable energy, climate change, and sustainability practices, bringing together global experts to discuss these critical issues.
84
+
85
+
86
+
87
+ # Data
88
+ QUERY: {{query}}
89
+ RESPONSE: {{response}}
90
+
91
+
92
+ # Tasks
93
+ ## Please provide your assessment Score for the previous RESPONSE in relation to the QUERY based on the Definitions above. Your output should include the following information:
94
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
95
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
96
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
97
+
98
+
99
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
100
+ # Output
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._retrieval import RetrievalEvaluator
6
+
7
+ __all__ = [
8
+ "RetrievalEvaluator",
9
+ ]
@@ -0,0 +1,112 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ import logging
6
+ import os
7
+ from typing import Dict, List, Union
8
+ from typing_extensions import overload, override
9
+
10
+ from azure.ai.evaluation._evaluators._common._base_prompty_eval import PromptyEvaluatorBase
11
+ from azure.ai.evaluation._model_configurations import Conversation
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class RetrievalEvaluator(PromptyEvaluatorBase[Union[str, float]]):
17
+ """
18
+ Evaluates retrieval score for a given query and context or a multi-turn conversation, including reasoning.
19
+
20
+ The retrieval measure assesses the AI system's performance in retrieving information
21
+ for additional context (e.g. a RAG scenario).
22
+
23
+ Retrieval scores range from 1 to 5, with 1 being the worst and 5 being the best.
24
+
25
+ High retrieval scores indicate that the AI system has successfully extracted and ranked
26
+ the most relevant information at the top, without introducing bias from external knowledge
27
+ and ignoring factual correctness. Conversely, low retrieval scores suggest that the AI system
28
+ has failed to surface the most relevant context chunks at the top of the list
29
+ and/or introduced bias and ignored factual correctness.
30
+
31
+ :param model_config: Configuration for the Azure OpenAI model.
32
+ :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
33
+ ~azure.ai.evaluation.OpenAIModelConfiguration]
34
+ :return: A function that evaluates and generates metrics for "chat" scenario.
35
+ :rtype: Callable
36
+
37
+ .. admonition:: Example:
38
+
39
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
40
+ :start-after: [START retrieval_evaluator]
41
+ :end-before: [END retrieval_evaluator]
42
+ :language: python
43
+ :dedent: 8
44
+ :caption: Initialize and call a RetrievalEvaluator.
45
+
46
+ .. note::
47
+
48
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
49
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
50
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
51
+ """
52
+
53
+ _PROMPTY_FILE = "retrieval.prompty"
54
+ _RESULT_KEY = "retrieval"
55
+
56
+ id = "azureml://registries/azureml/models/Retrieval-Evaluator/versions/1"
57
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
58
+
59
+ @override
60
+ def __init__(self, model_config): # pylint: disable=super-init-not-called
61
+ current_dir = os.path.dirname(__file__)
62
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
63
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self._RESULT_KEY)
64
+
65
+ @overload
66
+ def __call__(
67
+ self,
68
+ *,
69
+ query: str,
70
+ context: str,
71
+ ) -> Dict[str, Union[str, float]]:
72
+ """Evaluates retrieval for a given a query and context
73
+
74
+ :keyword query: The query to be evaluated. Mutually exclusive with `conversation` parameter.
75
+ :paramtype query: Optional[str]
76
+ :keyword context: The context to be evaluated. Mutually exclusive with `conversation` parameter.
77
+ :paramtype context: Optional[str]
78
+ :return: The scores for Chat scenario.
79
+ :rtype: Dict[str, Union[str, float]]
80
+ """
81
+
82
+ @overload
83
+ def __call__(
84
+ self,
85
+ *,
86
+ conversation: Conversation,
87
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, float]]]]]:
88
+ """Evaluates retrieval for a for a multi-turn evaluation. If the conversation has more than one turn,
89
+ the evaluator will aggregate the results of each turn.
90
+
91
+ :keyword conversation: The conversation to be evaluated.
92
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
93
+ :return: The scores for Chat scenario.
94
+ :rtype: Dict[str, Union[float, Dict[str, List[float]]]]
95
+ """
96
+
97
+ @override
98
+ def __call__(self, *args, **kwargs): # pylint: disable=docstring-missing-param
99
+ """Evaluates retrieval score chat scenario. Accepts either a query and context for a single evaluation,
100
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
101
+ the evaluator will aggregate the results of each turn.
102
+
103
+ :keyword query: The query to be evaluated. Mutually exclusive with `conversation` parameter.
104
+ :paramtype query: Optional[str]
105
+ :keyword context: The context to be evaluated. Mutually exclusive with `conversation` parameter.
106
+ :paramtype context: Optional[str]
107
+ :keyword conversation: The conversation to be evaluated.
108
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
109
+ :return: The scores for Chat scenario.
110
+ :rtype: :rtype: Dict[str, Union[float, Dict[str, List[str, float]]]]
111
+ """
112
+ return super().__call__(*args, **kwargs)
@@ -0,0 +1,93 @@
1
+ ---
2
+ name: Retrieval
3
+ description: Evaluates retrieval quality score for RAG scenario
4
+ model:
5
+ api: chat
6
+ parameters:
7
+ temperature: 0.0
8
+ max_tokens: 1600
9
+ top_p: 1.0
10
+ presence_penalty: 0
11
+ frequency_penalty: 0
12
+ response_format:
13
+ type: text
14
+
15
+ inputs:
16
+ query:
17
+ type: string
18
+ context:
19
+ type: string
20
+
21
+ ---
22
+ system:
23
+ # Instruction
24
+ ## Goal
25
+ ### You are an expert in evaluating the quality of a list of CONTEXT chunks from a query based on provided definition and data. Your goal will involve answering the questions below using the information provided.
26
+ - **Definition**: You are given a definition of the retrieval quality that is being evaluated to help guide your Score.
27
+ - **Data**: Your input data include QUERY and CONTEXT.
28
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
29
+
30
+ user:
31
+ # Definition
32
+ **Retrieval** refers to measuring how relevant the context chunks are to address a query and how the most relevant context chunks are surfaced at the top of the list. It emphasizes the extraction and ranking of the most relevant information at the top, without introducing bias from external knowledge and ignoring factual correctness. It assesses the relevance and effectiveness of the retrieved context chunks with respect to the query.
33
+
34
+ # Ratings
35
+ ## [Retrieval: 1] (Irrelevant Context, External Knowledge Bias)
36
+ **Definition:** The retrieved context chunks are not relevant to the query despite any conceptual similarities. There is no overlap between the query and the retrieved information, and no useful chunks appear in the results. They introduce external knowledge that isn't part of the retrieval documents.
37
+
38
+ **Examples:**
39
+ **Query:** what is kuchen?
40
+ **Context:** ["There's nothing like the taste of a cake you made in your own kitchen. Baking a cake is as simple as measuring ingredients, mixing them in the right order, and remembering to take the cake out of the oven before it burns.", "A steady 325-350 degrees is ideal when it comes to baking pound cake. Position the pan in the middle of the oven, and rotate it once, halfway through the baking time, as it bakes to account for any hot spots. "CHOCOLATE POUND CAKE. Cream butter, sugar ... and floured bundt pan, 10 inch pan or 2 (9x5x3 inch) loaf pans. Bake at ... pans. Bake until cake tester inserted in ... to drizzle down sides. 4. BUTTERMILK LEMON POUND CAKE."", "Pour batter into your pan(s) and place in the oven. Cook for 75 minutes, checking periodically. Some ovens cook unevenly or quickly -- if this describes yours, keep an eye on it. 1 If to be used for fancy ornamented cakes, bake 30 to 35 minutes in a dripping-pan. 2 Insert a skewer or toothpick to see if it's finished.", "As a general rule of thumb you can bake most cakes at 375 degrees Fahrenheit (which is 180 degrees Celsius) and check them after about 30 minutes and expect it to take at least 45 minutes.", "Till a toothpick inserted in the center of the cake comes out clean. Depends on the heat of your oven but start checking at about 45 minutes and when the cake is golden brown. sonnyboy · 8 years ago. Thumbs up.", "1 This results in a pound cake with maximum volume. 2 Be patient. Beat softened butter (and cream cheese or vegetable shortening) at medium speed with an electric mixer until creamy. 3 This can take from 1 to 7 minutes, depending on the power of your mixer."]
41
+
42
+ **Query:** What are the main economic impacts of global warming?
43
+ **Context:** ["Economic theories such as supply and demand explain how prices fluctuate in a free market.", "Global warming is caused by increased carbon dioxide levels, which affect the environment and the atmosphere.", "Political factors also play a role in economic decisions across nations."]
44
+
45
+ ## [Retrieval: 2] (Partially Relevant Context, Poor Ranking, External Knowledge Bias)
46
+ **Definition:** The context chunks are partially relevant to address the query but are mostly irrelevant, and external knowledge or LLM bias starts influencing the context chunks. The most relevant chunks are either missing or placed at the bottom.
47
+
48
+ **Examples:**
49
+ **Query:** what is rappelling
50
+ **Context:** ["5. Cancel. Rappelling is the process of coming down from a mountain that is usually done with two pieces of rope. Use a natural anchor or a set of bolts to rappel from with help from an experienced rock climber in this free video on rappelling techniques. Part of the Video Series: Rappelling & Rock Climbing.", "Abseiling (/ˈaebseɪl/ ˈæbseɪl /or/ ; ˈɑːpzaɪl From german, abseilen meaning to rope), down also called, rappelling is the controlled descent of a vertical, drop such as a rock, face using a. Rope climbers use this technique when a cliff or slope is too steep/and or dangerous to descend without. protection", "1. rappel - (mountaineering) a descent of a vertical cliff or wall made by using a doubled rope that is fixed to a higher point and wrapped around the body. abseil. mountain climbing, mountaineering-the activity of climbing a mountain. descent-the act of changing your location in a downward direction."]
51
+
52
+ **Query:** Describe the causes of the French Revolution.
53
+ **Context:** ["The French Revolution started due to economic disparity, leading to unrest among the lower classes.", "The Industrial Revolution also contributed to changes in society during the 18th century.", "Philosophers like Rousseau inspired revolutionary thinking, but the taxation system played a role as well."]
54
+
55
+ ## [Retrieval: 3] (Relevant Context Ranked Bottom)
56
+ **Definition:** The context chunks contain relevant information to address the query, but the most pertinent chunks are located at the bottom of the list.
57
+
58
+ **Examples:**
59
+ **Query:** what are monocytes
60
+ **Context:** ["Monocytes are produced by the bone marrow from precursors called monoblasts, bipotent cells that differentiated from hematopoietic stem cells. Monocytes circulate in the bloodstream for about one to three days and then typically move into tissues throughout the body. Monocytes which migrate from the bloodstream to other tissues will then differentiate into tissue resident macrophages or dendritic cells. Macrophages are responsible for protecting tissues from foreign substances, but are also suspected to be important in the formation of important organs like the heart and brain.", "Report Abuse. A high level of monocytes could mean a number of things. They're a type of phagocyte-a type of cell found in your blood that 'eats' many types of attacking bacteria and other microorganisms when it matures. High levels could mean that you have an infection as more develop to fight it.", "Our immune system has a key component called the white blood cells, of which there are several different kinds. Monocytes are a type of white blood cell that fights off bacteria, viruses and fungi. Monocytes are the biggest type of white blood cell in the immune system. Originally formed in the bone marrow, they are released into our blood and tissues. When certain germs enter the body, they quickly rush to the site for attack.", "Monocyte. Monocytes are produced by the bone marrow from stem cell precursors called monoblasts. Monocytes circulate in the bloodstream for about one to three days and then typically move into tissues throughout the body. They make up three to eight percent of the leukocytes in the blood. Monocyte under a light microscope (40x) from a peripheral blood smear surrounded by red blood cells. Monocytes are a type of white blood cell, part of the human body's immune system. They are usually identified in stained smears by their large two-lobed nucleus.", "A monocyte (pictured below) is a large type of white blood cell with one large, smooth, well-defined, indented, slightly folded, oval, kidney-shaped, or notched nucleus (the cell's control center). White blood cells help protect the body against diseases and fight infections.", "Monocytes are white blood cells that are common to the blood of all vertebrates and they help the immune system to function properly. There are a number of reasons for a high monocyte count, which can also be called monocytosis. Some of the reasons can include stress, viral fevers, inflammation and organ necrosis. A physician may order a monocyte blood count test to check for raised levels of monocytes. There are a number of reasons for this test, from a simple health check up to people suffering from heart attacks and leukemia. Complications with the blood and cancer are two other reasons that this test may be performed.", "Monocytes are considered the largest white blood cell. These cells are part of the innate immune system. Monocytes also play important roles in the immune function of the body. These cells are often found when doing a stained smear and appear large kidney shaped. Many of these are found in the spleen area.", "This is taken directly from-http://www.wisegeek.com/what-are-monocytes.htm#. Monocytes are a type of leukocyte or white blood cell which play a role in immune system function. Depending on a patient's level of health, monocytes make up between one and three percent of the total white blood cells in the body. For example, if monocytes are elevated because of an inflammation caused by a viral infection, the patient would be given medication to kill the virus and bring down the inflammation. Typically, when a monocyte count is requested, the lab will also run other tests on the blood to generate a complete picture.", "3D Rendering of a Monocyte. Monocytes are a type of white blood cells (leukocytes). They are the largest of all leukocytes. They are part of the innate immune system of vertebrates including all mammals (humans included), birds, reptiles, and fish. Monocytes which migrate from the bloodstream to other tissues will then differentiate into tissue resident macrophages or dendritic cells. Macrophages are responsible for protecting tissues from foreign substances, but are also suspected to be important in the formation of important organs like the heart and brain."]
61
+
62
+ **Query:** What were the key features of the Magna Carta?
63
+ **Context:** ["The Magna Carta influenced the legal system in Europe, especially in constitutional law.", "It was signed in 1215 by King John of England to limit the powers of the monarchy.", "The Magna Carta introduced principles like due process and habeas corpus, which are key features of modern legal systems."]
64
+
65
+ ## [Retrieval: 4] (Relevant Context Ranked Middle, No External Knowledge Bias and Factual Accuracy Ignored)
66
+ **Definition:** The context chunks fully address the query, but the most relevant chunk is ranked in the middle of the list. No external knowledge is used to influence the ranking of the chunks; the system only relies on the provided context. Factual accuracy remains out of scope for evaluation.
67
+
68
+ **Examples:**
69
+ **Query:** do game shows pay their contestants
70
+ **Context:** ["So, in the end, game show winners get some of the money that TV advertisers pay to the networks, who pay the show producers, who then pay the game show winners. Just in the same way that the actors, and crew of a show get paid. Game shows, like other programs, have costs to produce the programs—they have to pay for sets, cameras, talent (the hosts), and also prizes to contestants.", "(Valerie Macon/Getty Images). Oh, happy day! You're a contestant on a popular game show—The Price Is Right, let's say. You spin the wheel, you make the winning bid, and suddenly—ka-ching!—you've won the Lexus or the dishwasher or the lifetime supply of nail clippers.", "1 If you can use most of the prizes the show offers, such as a new car or trip, you may be content to appear on a game show that features material prizes. 2 If not, you should probably try out for a show where cash is the main prize. 3 In the United States, game show contestants must pay taxes on any prizes they win. 2. Meet the eligibility requirements. All game shows have certain eligibility requirements for their contestants. Generally, you must be at least 18 years of age, except for those shows that use child or teenage contestants, and you are allowed to appear on no more than 1 game show per year.", "Rating Newest Oldest. Best Answer: You don't always win the money amount on the front of your lectern when you are on a game show. As someone else said, 2nd place earns $2000 and 3rd place earns $1000 in Jeopardy! In any case, the prize money is paid out from the ad revenue that the show receives from sponsors. I think in this case Who Wants to be a Millionaire or Deal or No Deal is the best example of how shows can be successful while still paying the prize money. I feel this way because these shows have a potential, however small it may be, to pay out 1 million dollars to every contestant on the show. Here is the reality. Regardless of the show whether it be a game show or a drama, a network will receive money from commercial advertising based on the viewership. With this in mind a game show costs very little to actually air compared to a full production drama series, that's where the prize money comes from"]
71
+
72
+ ## [Retrieval: 5] (Highly Relevant, Well Ranked, No Bias Introduced)
73
+ **Definition:** The context chunks not only fully address the query, but also surface the most relevant chunks at the top of the list. The retrieval respects the internal context, avoids relying on any outside knowledge, and focuses solely on pulling the most useful content to the forefront, irrespective of the factual correctness of the information.
74
+
75
+ **Examples:**
76
+ **Query:** The smallest blood vessels in your body, where gas exchange occurs are called
77
+ **Context:** ["Gas exchange is the delivery of oxygen from the lungs to the bloodstream, and the elimination of carbon dioxide from the bloodstream to the lungs. It occurs in the lungs between the alveoli and a network of tiny blood vessels called capillaries, which are located in the walls of the alveoli. The walls of the alveoli actually share a membrane with the capillaries in which oxygen and carbon dioxide move freely between the respiratory system and the bloodstream.", "Arterioles branch into capillaries, the smallest of all blood vessels. Capillaries are the sites of nutrient and waste exchange between the blood and body cells. Capillaries are microscopic vessels that join the arterial system with the venous system.", "Arterioles are the smallest arteries and regulate blood flow into capillary beds through vasoconstriction and vasodilation. Capillaries are the smallest vessels and allow for exchange of substances between the blood and interstitial fluid. Continuous capillaries are most common and allow passage of fluids and small solutes. Fenestrated capillaries are more permeable to fluids and solutes than continuous capillaries.", "Tweet. The smallest blood vessels in the human body are capillaries. They are responsible for the absorption of oxygen into the blood stream and for removing the deoxygenated red blood cells for return to the heart and lungs for reoxygenation.", "2. Capillaries—these are the sites of gas exchange between the tissues. 3. Veins—these return oxygen poor blood to the heart, except for the vein that carries blood from the lungs. On the right is a diagram showing how the three connect. Notice the artery and vein are much larger than the capillaries.", "Gas exchange occurs in the capillaries which are the smallest blood vessels in the body. Each artery that comes from the heart is surrounded by capillaries so that they can take it to the various parts of the body."]
78
+
79
+
80
+ # Data
81
+ QUERY: {{query}}
82
+ CONTEXT: {{context}}
83
+
84
+
85
+ # Tasks
86
+ ## Please provide your assessment Score for the previous CONTEXT in relation to the QUERY based on the Definitions above. Your output should include the following information:
87
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
88
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
89
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
90
+
91
+
92
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
93
+ # Output
@@ -0,0 +1,10 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._rouge import RougeScoreEvaluator, RougeType
6
+
7
+ __all__ = [
8
+ "RougeScoreEvaluator",
9
+ "RougeType",
10
+ ]
@@ -0,0 +1,98 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from enum import Enum
5
+
6
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
7
+
8
+ from azure.ai.evaluation._vendor.rouge_score import rouge_scorer
9
+
10
+
11
+ class RougeType(Enum):
12
+ """
13
+ Enumeration of ROUGE (Recall-Oriented Understudy for Gisting Evaluation) types.
14
+ """
15
+
16
+ ROUGE_1 = "rouge1"
17
+ """Overlap of unigrams (single words) between generated and reference text."""
18
+
19
+ ROUGE_2 = "rouge2"
20
+ """Overlap of bigrams (two consecutive words) between generated and reference text."""
21
+
22
+ ROUGE_3 = "rouge3"
23
+ """Overlap of trigrams (three consecutive words) between generated and reference text."""
24
+
25
+ ROUGE_4 = "rouge4"
26
+ """Overlap of four-grams (four consecutive words) between generated and reference text."""
27
+
28
+ ROUGE_5 = "rouge5"
29
+ """Overlap of five-grams (five consecutive words) between generated and reference text."""
30
+
31
+ ROUGE_L = "rougeL"
32
+ """Overlap of L-grams (L consecutive words) between generated and reference text."""
33
+
34
+
35
+ class _AsyncRougeScoreEvaluator:
36
+ def __init__(self, rouge_type: RougeType):
37
+ self._rouge_type = rouge_type
38
+
39
+ async def __call__(self, *, ground_truth: str, response: str, **kwargs):
40
+ scorer = rouge_scorer.RougeScorer(rouge_types=[self._rouge_type.value])
41
+ metrics = scorer.score(ground_truth, response)[self._rouge_type.value]
42
+ return {
43
+ "rouge_precision": metrics.precision,
44
+ "rouge_recall": metrics.recall,
45
+ "rouge_f1_score": metrics.fmeasure,
46
+ }
47
+
48
+
49
+ class RougeScoreEvaluator:
50
+ """
51
+ Calculates the ROUGE score for a given response and ground truth.
52
+
53
+ The ROUGE score (Recall-Oriented Understudy for Gisting Evaluation) evaluates the similarity between the
54
+ generated text and reference text based on n-gram overlap, including ROUGE-N (unigram, bigram, etc.), and
55
+ ROUGE-L (longest common subsequence). It calculates precision, recall, and F1 scores to capture how well
56
+ the generated text matches the reference text. Rouge type options are "rouge1" (Unigram overlap), "rouge2"
57
+ (Bigram overlap), "rouge3" (Trigram overlap), "rouge4" (4-gram overlap), "rouge5" (5-gram overlap), "rougeL"
58
+ (L-graph overlap)
59
+
60
+ Use the ROUGE score when you need a robust evaluation metric for text summarization, machine translation, and
61
+ other natural language processing tasks, especially when focusing on recall and the ability to capture relevant
62
+ information from the reference text.
63
+
64
+ ROUGE scores range from 0 to 1, with higher scores indicating better quality.
65
+
66
+ .. admonition:: Example:
67
+
68
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
69
+ :start-after: [START rouge_score_evaluator]
70
+ :end-before: [END rouge_score_evaluator]
71
+ :language: python
72
+ :dedent: 8
73
+ :caption: Initialize and call a RougeScoreEvaluator with a four-gram rouge type.
74
+ """
75
+
76
+ id = "azureml://registries/azureml/models/Rouge-Score-Evaluator/versions/3"
77
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
78
+
79
+ def __init__(self, rouge_type: RougeType):
80
+ self._async_evaluator = _AsyncRougeScoreEvaluator(rouge_type)
81
+
82
+ def __call__(self, *, ground_truth: str, response: str, **kwargs):
83
+ """
84
+ Evaluate the ROUGE score between the response and the ground truth.
85
+
86
+ :keyword response: The response to be evaluated.
87
+ :paramtype response: str
88
+ :keyword ground_truth: The ground truth to be compared against.
89
+ :paramtype ground_truth: str
90
+ :return: The ROUGE score.
91
+ :rtype: Dict[str, float]
92
+ """
93
+ return async_run_allowing_running_loop(
94
+ self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
95
+ )
96
+
97
+ def _to_async(self):
98
+ return self._async_evaluator