azure-ai-evaluation 1.0.0b4__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. azure/ai/evaluation/__init__.py +22 -0
  2. azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +4 -0
  3. azure/ai/evaluation/_common/constants.py +5 -0
  4. azure/ai/evaluation/_common/math.py +73 -2
  5. azure/ai/evaluation/_common/rai_service.py +250 -62
  6. azure/ai/evaluation/_common/utils.py +196 -23
  7. azure/ai/evaluation/_constants.py +7 -6
  8. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/__init__.py +3 -2
  9. azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +13 -4
  10. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/proxy_client.py +19 -6
  11. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +46 -0
  12. azure/ai/evaluation/_evaluate/_eval_run.py +55 -14
  13. azure/ai/evaluation/_evaluate/_evaluate.py +312 -228
  14. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +7 -6
  15. azure/ai/evaluation/_evaluate/_utils.py +46 -11
  16. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +17 -18
  17. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +67 -31
  18. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -34
  19. azure/ai/evaluation/_evaluators/_common/_base_eval.py +37 -24
  20. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +21 -9
  21. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +52 -16
  22. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +91 -48
  23. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +100 -26
  24. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +94 -26
  25. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +96 -26
  26. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +97 -26
  27. azure/ai/evaluation/_evaluators/_eci/_eci.py +31 -4
  28. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +20 -13
  29. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +67 -36
  30. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -36
  31. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +14 -16
  32. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +106 -34
  33. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
  34. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
  35. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +20 -27
  36. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
  37. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +132 -0
  38. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +55 -0
  39. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +100 -0
  40. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +124 -0
  41. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +100 -0
  42. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +100 -0
  43. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +100 -0
  44. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +87 -31
  45. azure/ai/evaluation/_evaluators/_qa/_qa.py +23 -31
  46. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +72 -36
  47. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +78 -42
  48. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +83 -125
  49. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +74 -24
  50. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +26 -27
  51. azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
  52. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +148 -0
  53. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +37 -28
  54. azure/ai/evaluation/_evaluators/_xpia/xpia.py +94 -33
  55. azure/ai/evaluation/_exceptions.py +19 -0
  56. azure/ai/evaluation/_model_configurations.py +83 -15
  57. azure/ai/evaluation/_version.py +1 -1
  58. azure/ai/evaluation/simulator/__init__.py +2 -1
  59. azure/ai/evaluation/simulator/_adversarial_scenario.py +20 -1
  60. azure/ai/evaluation/simulator/_adversarial_simulator.py +29 -35
  61. azure/ai/evaluation/simulator/_constants.py +11 -1
  62. azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
  63. azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
  64. azure/ai/evaluation/simulator/_direct_attack_simulator.py +17 -9
  65. azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
  66. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +22 -1
  67. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +90 -35
  68. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +4 -2
  69. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +8 -4
  70. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -4
  71. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -1
  72. azure/ai/evaluation/simulator/_simulator.py +165 -105
  73. azure/ai/evaluation/simulator/_utils.py +31 -13
  74. azure_ai_evaluation-1.0.1.dist-info/METADATA +600 -0
  75. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/NOTICE.txt +20 -0
  76. azure_ai_evaluation-1.0.1.dist-info/RECORD +119 -0
  77. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/WHEEL +1 -1
  78. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -322
  79. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
  80. azure_ai_evaluation-1.0.0b4.dist-info/METADATA +0 -535
  81. azure_ai_evaluation-1.0.0b4.dist-info/RECORD +0 -106
  82. /azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +0 -0
  83. {azure_ai_evaluation-1.0.0b4.dist-info → azure_ai_evaluation-1.0.1.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,11 @@
1
1
  ---
2
2
  name: Retrieval
3
- description: Evaluates retrieval score for Chat scenario
3
+ description: Evaluates retrieval quality score for RAG scenario
4
4
  model:
5
5
  api: chat
6
6
  parameters:
7
7
  temperature: 0.0
8
+ max_tokens: 1600
8
9
  top_p: 1.0
9
10
  presence_penalty: 0
10
11
  frequency_penalty: 0
@@ -14,30 +15,79 @@ model:
14
15
  inputs:
15
16
  query:
16
17
  type: string
17
- history:
18
- type: string
19
- documents:
18
+ context:
20
19
  type: string
21
20
 
22
21
  ---
23
22
  system:
24
- A chat history between user and bot is shown below
25
- A list of documents is shown below in json format, and each document has one unique id.
26
- These listed documents are used as context to answer the given question.
27
- The task is to score the relevance between the documents and the potential answer to the given question in the range of 1 to 5.
28
- 1 means none of the documents is relevant to the question at all. 5 means either one of the document or combination of a few documents is ideal for answering the given question.
29
- Think through step by step:
30
- - Summarize each given document first
31
- - Determine the underlying intent of the given question, when the question is ambiguous, refer to the given chat history
32
- - Measure how suitable each document to the given question, list the document id and the corresponding relevance score.
33
- - Summarize the overall relevance of given list of documents to the given question after # Overall Reason, note that the answer to the question can solely from single document or a combination of multiple documents.
34
- - Finally, output "# Result" followed by a score from 1 to 5.
35
-
36
- # Question
37
- {{ query }}
38
- # Chat History
39
- {{ history }}
40
- # Documents
41
- ===BEGIN RETRIEVED DOCUMENTS===
42
- {{ documents }}
43
- ===END RETRIEVED DOCUMENTS===
23
+ # Instruction
24
+ ## Goal
25
+ ### You are an expert in evaluating the quality of a list of CONTEXT chunks from a query based on provided definition and data. Your goal will involve answering the questions below using the information provided.
26
+ - **Definition**: You are given a definition of the retrieval quality that is being evaluated to help guide your Score.
27
+ - **Data**: Your input data include QUERY and CONTEXT.
28
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
29
+
30
+ user:
31
+ # Definition
32
+ **Retrieval** refers to measuring how relevant the context chunks are to address a query and how the most relevant context chunks are surfaced at the top of the list. It emphasizes the extraction and ranking of the most relevant information at the top, without introducing bias from external knowledge and ignoring factual correctness. It assesses the relevance and effectiveness of the retrieved context chunks with respect to the query.
33
+
34
+ # Ratings
35
+ ## [Retrieval: 1] (Irrelevant Context, External Knowledge Bias)
36
+ **Definition:** The retrieved context chunks are not relevant to the query despite any conceptual similarities. There is no overlap between the query and the retrieved information, and no useful chunks appear in the results. They introduce external knowledge that isn't part of the retrieval documents.
37
+
38
+ **Examples:**
39
+ **Query:** what is kuchen?
40
+ **Context:** ["There's nothing like the taste of a cake you made in your own kitchen. Baking a cake is as simple as measuring ingredients, mixing them in the right order, and remembering to take the cake out of the oven before it burns.", "A steady 325-350 degrees is ideal when it comes to baking pound cake. Position the pan in the middle of the oven, and rotate it once, halfway through the baking time, as it bakes to account for any hot spots. "CHOCOLATE POUND CAKE. Cream butter, sugar ... and floured bundt pan, 10 inch pan or 2 (9x5x3 inch) loaf pans. Bake at ... pans. Bake until cake tester inserted in ... to drizzle down sides. 4. BUTTERMILK LEMON POUND CAKE."", "Pour batter into your pan(s) and place in the oven. Cook for 75 minutes, checking periodically. Some ovens cook unevenly or quickly -- if this describes yours, keep an eye on it. 1 If to be used for fancy ornamented cakes, bake 30 to 35 minutes in a dripping-pan. 2 Insert a skewer or toothpick to see if it's finished.", "As a general rule of thumb you can bake most cakes at 375 degrees Fahrenheit (which is 180 degrees Celsius) and check them after about 30 minutes and expect it to take at least 45 minutes.", "Till a toothpick inserted in the center of the cake comes out clean. Depends on the heat of your oven but start checking at about 45 minutes and when the cake is golden brown. sonnyboy · 8 years ago. Thumbs up.", "1 This results in a pound cake with maximum volume. 2 Be patient. Beat softened butter (and cream cheese or vegetable shortening) at medium speed with an electric mixer until creamy. 3 This can take from 1 to 7 minutes, depending on the power of your mixer."]
41
+
42
+ **Query:** What are the main economic impacts of global warming?
43
+ **Context:** ["Economic theories such as supply and demand explain how prices fluctuate in a free market.", "Global warming is caused by increased carbon dioxide levels, which affect the environment and the atmosphere.", "Political factors also play a role in economic decisions across nations."]
44
+
45
+ ## [Retrieval: 2] (Partially Relevant Context, Poor Ranking, External Knowledge Bias)
46
+ **Definition:** The context chunks are partially relevant to address the query but are mostly irrelevant, and external knowledge or LLM bias starts influencing the context chunks. The most relevant chunks are either missing or placed at the bottom.
47
+
48
+ **Examples:**
49
+ **Query:** what is rappelling
50
+ **Context:** ["5. Cancel. Rappelling is the process of coming down from a mountain that is usually done with two pieces of rope. Use a natural anchor or a set of bolts to rappel from with help from an experienced rock climber in this free video on rappelling techniques. Part of the Video Series: Rappelling & Rock Climbing.", "Abseiling (/ˈaebseɪl/ ˈæbseɪl /or/ ; ˈɑːpzaɪl From german, abseilen meaning to rope), down also called, rappelling is the controlled descent of a vertical, drop such as a rock, face using a. Rope climbers use this technique when a cliff or slope is too steep/and or dangerous to descend without. protection", "1. rappel - (mountaineering) a descent of a vertical cliff or wall made by using a doubled rope that is fixed to a higher point and wrapped around the body. abseil. mountain climbing, mountaineering-the activity of climbing a mountain. descent-the act of changing your location in a downward direction."]
51
+
52
+ **Query:** Describe the causes of the French Revolution.
53
+ **Context:** ["The French Revolution started due to economic disparity, leading to unrest among the lower classes.", "The Industrial Revolution also contributed to changes in society during the 18th century.", "Philosophers like Rousseau inspired revolutionary thinking, but the taxation system played a role as well."]
54
+
55
+ ## [Retrieval: 3] (Relevant Context Ranked Bottom)
56
+ **Definition:** The context chunks contain relevant information to address the query, but the most pertinent chunks are located at the bottom of the list.
57
+
58
+ **Examples:**
59
+ **Query:** what are monocytes
60
+ **Context:** ["Monocytes are produced by the bone marrow from precursors called monoblasts, bipotent cells that differentiated from hematopoietic stem cells. Monocytes circulate in the bloodstream for about one to three days and then typically move into tissues throughout the body. Monocytes which migrate from the bloodstream to other tissues will then differentiate into tissue resident macrophages or dendritic cells. Macrophages are responsible for protecting tissues from foreign substances, but are also suspected to be important in the formation of important organs like the heart and brain.", "Report Abuse. A high level of monocytes could mean a number of things. They're a type of phagocyte-a type of cell found in your blood that 'eats' many types of attacking bacteria and other microorganisms when it matures. High levels could mean that you have an infection as more develop to fight it.", "Our immune system has a key component called the white blood cells, of which there are several different kinds. Monocytes are a type of white blood cell that fights off bacteria, viruses and fungi. Monocytes are the biggest type of white blood cell in the immune system. Originally formed in the bone marrow, they are released into our blood and tissues. When certain germs enter the body, they quickly rush to the site for attack.", "Monocyte. Monocytes are produced by the bone marrow from stem cell precursors called monoblasts. Monocytes circulate in the bloodstream for about one to three days and then typically move into tissues throughout the body. They make up three to eight percent of the leukocytes in the blood. Monocyte under a light microscope (40x) from a peripheral blood smear surrounded by red blood cells. Monocytes are a type of white blood cell, part of the human body's immune system. They are usually identified in stained smears by their large two-lobed nucleus.", "A monocyte (pictured below) is a large type of white blood cell with one large, smooth, well-defined, indented, slightly folded, oval, kidney-shaped, or notched nucleus (the cell's control center). White blood cells help protect the body against diseases and fight infections.", "Monocytes are white blood cells that are common to the blood of all vertebrates and they help the immune system to function properly. There are a number of reasons for a high monocyte count, which can also be called monocytosis. Some of the reasons can include stress, viral fevers, inflammation and organ necrosis. A physician may order a monocyte blood count test to check for raised levels of monocytes. There are a number of reasons for this test, from a simple health check up to people suffering from heart attacks and leukemia. Complications with the blood and cancer are two other reasons that this test may be performed.", "Monocytes are considered the largest white blood cell. These cells are part of the innate immune system. Monocytes also play important roles in the immune function of the body. These cells are often found when doing a stained smear and appear large kidney shaped. Many of these are found in the spleen area.", "This is taken directly from-http://www.wisegeek.com/what-are-monocytes.htm#. Monocytes are a type of leukocyte or white blood cell which play a role in immune system function. Depending on a patient's level of health, monocytes make up between one and three percent of the total white blood cells in the body. For example, if monocytes are elevated because of an inflammation caused by a viral infection, the patient would be given medication to kill the virus and bring down the inflammation. Typically, when a monocyte count is requested, the lab will also run other tests on the blood to generate a complete picture.", "3D Rendering of a Monocyte. Monocytes are a type of white blood cells (leukocytes). They are the largest of all leukocytes. They are part of the innate immune system of vertebrates including all mammals (humans included), birds, reptiles, and fish. Monocytes which migrate from the bloodstream to other tissues will then differentiate into tissue resident macrophages or dendritic cells. Macrophages are responsible for protecting tissues from foreign substances, but are also suspected to be important in the formation of important organs like the heart and brain."]
61
+
62
+ **Query:** What were the key features of the Magna Carta?
63
+ **Context:** ["The Magna Carta influenced the legal system in Europe, especially in constitutional law.", "It was signed in 1215 by King John of England to limit the powers of the monarchy.", "The Magna Carta introduced principles like due process and habeas corpus, which are key features of modern legal systems."]
64
+
65
+ ## [Retrieval: 4] (Relevant Context Ranked Middle, No External Knowledge Bias and Factual Accuracy Ignored)
66
+ **Definition:** The context chunks fully address the query, but the most relevant chunk is ranked in the middle of the list. No external knowledge is used to influence the ranking of the chunks; the system only relies on the provided context. Factual accuracy remains out of scope for evaluation.
67
+
68
+ **Examples:**
69
+ **Query:** do game shows pay their contestants
70
+ **Context:** ["So, in the end, game show winners get some of the money that TV advertisers pay to the networks, who pay the show producers, who then pay the game show winners. Just in the same way that the actors, and crew of a show get paid. Game shows, like other programs, have costs to produce the programs—they have to pay for sets, cameras, talent (the hosts), and also prizes to contestants.", "(Valerie Macon/Getty Images). Oh, happy day! You're a contestant on a popular game show—The Price Is Right, let's say. You spin the wheel, you make the winning bid, and suddenly—ka-ching!—you've won the Lexus or the dishwasher or the lifetime supply of nail clippers.", "1 If you can use most of the prizes the show offers, such as a new car or trip, you may be content to appear on a game show that features material prizes. 2 If not, you should probably try out for a show where cash is the main prize. 3 In the United States, game show contestants must pay taxes on any prizes they win. 2. Meet the eligibility requirements. All game shows have certain eligibility requirements for their contestants. Generally, you must be at least 18 years of age, except for those shows that use child or teenage contestants, and you are allowed to appear on no more than 1 game show per year.", "Rating Newest Oldest. Best Answer: You don't always win the money amount on the front of your lectern when you are on a game show. As someone else said, 2nd place earns $2000 and 3rd place earns $1000 in Jeopardy! In any case, the prize money is paid out from the ad revenue that the show receives from sponsors. I think in this case Who Wants to be a Millionaire or Deal or No Deal is the best example of how shows can be successful while still paying the prize money. I feel this way because these shows have a potential, however small it may be, to pay out 1 million dollars to every contestant on the show. Here is the reality. Regardless of the show whether it be a game show or a drama, a network will receive money from commercial advertising based on the viewership. With this in mind a game show costs very little to actually air compared to a full production drama series, that's where the prize money comes from"]
71
+
72
+ ## [Retrieval: 5] (Highly Relevant, Well Ranked, No Bias Introduced)
73
+ **Definition:** The context chunks not only fully address the query, but also surface the most relevant chunks at the top of the list. The retrieval respects the internal context, avoids relying on any outside knowledge, and focuses solely on pulling the most useful content to the forefront, irrespective of the factual correctness of the information.
74
+
75
+ **Examples:**
76
+ **Query:** The smallest blood vessels in your body, where gas exchange occurs are called
77
+ **Context:** ["Gas exchange is the delivery of oxygen from the lungs to the bloodstream, and the elimination of carbon dioxide from the bloodstream to the lungs. It occurs in the lungs between the alveoli and a network of tiny blood vessels called capillaries, which are located in the walls of the alveoli. The walls of the alveoli actually share a membrane with the capillaries in which oxygen and carbon dioxide move freely between the respiratory system and the bloodstream.", "Arterioles branch into capillaries, the smallest of all blood vessels. Capillaries are the sites of nutrient and waste exchange between the blood and body cells. Capillaries are microscopic vessels that join the arterial system with the venous system.", "Arterioles are the smallest arteries and regulate blood flow into capillary beds through vasoconstriction and vasodilation. Capillaries are the smallest vessels and allow for exchange of substances between the blood and interstitial fluid. Continuous capillaries are most common and allow passage of fluids and small solutes. Fenestrated capillaries are more permeable to fluids and solutes than continuous capillaries.", "Tweet. The smallest blood vessels in the human body are capillaries. They are responsible for the absorption of oxygen into the blood stream and for removing the deoxygenated red blood cells for return to the heart and lungs for reoxygenation.", "2. Capillaries—these are the sites of gas exchange between the tissues. 3. Veins—these return oxygen poor blood to the heart, except for the vein that carries blood from the lungs. On the right is a diagram showing how the three connect. Notice the artery and vein are much larger than the capillaries.", "Gas exchange occurs in the capillaries which are the smallest blood vessels in the body. Each artery that comes from the heart is surrounded by capillaries so that they can take it to the various parts of the body."]
78
+
79
+
80
+ # Data
81
+ QUERY: {{query}}
82
+ CONTEXT: {{context}}
83
+
84
+
85
+ # Tasks
86
+ ## Please provide your assessment Score for the previous CONTEXT in relation to the QUERY based on the Definitions above. Your output should include the following information:
87
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
88
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
89
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "1", "2"...) based on the levels of the definitions.
90
+
91
+
92
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
93
+ # Output
@@ -4,12 +4,11 @@
4
4
  from enum import Enum
5
5
 
6
6
  from promptflow._utils.async_utils import async_run_allowing_running_loop
7
- from azure.ai.evaluation._vendor.rouge_score import rouge_scorer
8
7
 
9
- from azure.core import CaseInsensitiveEnumMeta
8
+ from azure.ai.evaluation._vendor.rouge_score import rouge_scorer
10
9
 
11
10
 
12
- class RougeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
11
+ class RougeType(Enum):
13
12
  """
14
13
  Enumeration of ROUGE (Recall-Oriented Understudy for Gisting Evaluation) types.
15
14
  """
@@ -38,8 +37,8 @@ class _AsyncRougeScoreEvaluator:
38
37
  self._rouge_type = rouge_type
39
38
 
40
39
  async def __call__(self, *, ground_truth: str, response: str, **kwargs):
41
- scorer = rouge_scorer.RougeScorer(rouge_types=[self._rouge_type])
42
- metrics = scorer.score(ground_truth, response)[self._rouge_type]
40
+ scorer = rouge_scorer.RougeScorer(rouge_types=[self._rouge_type.value])
41
+ metrics = scorer.score(ground_truth, response)[self._rouge_type.value]
43
42
  return {
44
43
  "rouge_precision": metrics.precision,
45
44
  "rouge_recall": metrics.recall,
@@ -49,34 +48,34 @@ class _AsyncRougeScoreEvaluator:
49
48
 
50
49
  class RougeScoreEvaluator:
51
50
  """
52
- Evaluator for computes the ROUGE scores between two strings.
53
-
54
- ROUGE (Recall-Oriented Understudy for Gisting Evaluation) is a set of metrics used to evaluate automatic
55
- summarization and machine translation. It measures the overlap between generated text and reference summaries.
56
- ROUGE focuses on recall-oriented measures to assess how well the generated text covers the reference text. Text
57
- summarization and document comparison are among optimal use cases for ROUGE, particularly in scenarios where text
58
- coherence and relevance are critical.
51
+ Calculates the ROUGE score for a given response and ground truth.
59
52
 
60
- **Usage**
53
+ The ROUGE score (Recall-Oriented Understudy for Gisting Evaluation) evaluates the similarity between the
54
+ generated text and reference text based on n-gram overlap, including ROUGE-N (unigram, bigram, etc.), and
55
+ ROUGE-L (longest common subsequence). It calculates precision, recall, and F1 scores to capture how well
56
+ the generated text matches the reference text. Rouge type options are "rouge1" (Unigram overlap), "rouge2"
57
+ (Bigram overlap), "rouge3" (Trigram overlap), "rouge4" (4-gram overlap), "rouge5" (5-gram overlap), "rougeL"
58
+ (L-graph overlap)
61
59
 
62
- .. code-block:: python
60
+ Use the ROUGE score when you need a robust evaluation metric for text summarization, machine translation, and
61
+ other natural language processing tasks, especially when focusing on recall and the ability to capture relevant
62
+ information from the reference text.
63
63
 
64
- eval_fn = RougeScoreEvaluator(rouge_type=RougeType.ROUGE_1)
65
- result = eval_fn(
66
- response="Tokyo is the capital of Japan.",
67
- ground_truth="The capital of Japan is Tokyo.")
64
+ ROUGE scores range from 0 to 1, with higher scores indicating better quality.
68
65
 
69
- **Output format**
66
+ .. admonition:: Example:
70
67
 
71
- .. code-block:: python
72
-
73
- {
74
- "rouge_precision": 1.0,
75
- "rouge_recall": 1.0,
76
- "rouge_f1_score": 1.0
77
- }
68
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
69
+ :start-after: [START rouge_score_evaluator]
70
+ :end-before: [END rouge_score_evaluator]
71
+ :language: python
72
+ :dedent: 8
73
+ :caption: Initialize and call a RougeScoreEvaluator with a four-gram rouge type.
78
74
  """
79
75
 
76
+ id = "azureml://registries/azureml/models/Rouge-Score-Evaluator/versions/3"
77
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
78
+
80
79
  def __init__(self, rouge_type: RougeType):
81
80
  self._async_evaluator = _AsyncRougeScoreEvaluator(rouge_type)
82
81
 
@@ -89,7 +88,7 @@ class RougeScoreEvaluator:
89
88
  :keyword ground_truth: The ground truth to be compared against.
90
89
  :paramtype ground_truth: str
91
90
  :return: The ROUGE score.
92
- :rtype: dict
91
+ :rtype: Dict[str, float]
93
92
  """
94
93
  return async_run_allowing_running_loop(
95
94
  self._async_evaluator, ground_truth=ground_truth, response=response, **kwargs
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._service_groundedness import GroundednessProEvaluator
6
+
7
+ __all__ = [
8
+ "GroundednessProEvaluator",
9
+ ]
@@ -0,0 +1,148 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from typing import List, Union, Dict
5
+ from typing_extensions import overload, override
6
+
7
+ from azure.ai.evaluation._common._experimental import experimental
8
+ from azure.ai.evaluation._common.constants import EvaluationMetrics
9
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
10
+ from azure.ai.evaluation._model_configurations import Conversation
11
+
12
+
13
+ @experimental
14
+ class GroundednessProEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
15
+ """
16
+ Evaluates service-based groundedness score for a given response, context, and query or a multi-turn conversation,
17
+ including reasoning.
18
+
19
+ The groundedness measure calls Azure AI Evaluation service to assess how well the AI-generated answer is grounded
20
+ in the source context. Even if the responses from LLM are factually correct, they'll be considered ungrounded if
21
+ they can't be verified against the provided sources (such as your input source or your database).
22
+
23
+ Service-based groundedness scores are boolean values, where True indicates that the response is grounded.
24
+
25
+ :param credential: The credential for connecting to Azure AI project. Required
26
+ :type credential: ~azure.core.credentials.TokenCredential
27
+ :param azure_ai_project: The scope of the Azure AI project.
28
+ It contains subscription id, resource group, and project name.
29
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
30
+ :param kwargs: Additional arguments to pass to the evaluator.
31
+ :type kwargs: Any
32
+
33
+ .. admonition:: Example:
34
+
35
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
36
+ :start-after: [START groundedness_pro_evaluator]
37
+ :end-before: [END groundedness_pro_evaluator]
38
+ :language: python
39
+ :dedent: 8
40
+ :caption: Initialize and call a GroundednessProEvaluator with a query, response, and context.
41
+
42
+ .. note::
43
+
44
+ If this evaluator is supplied to the `evaluate` function, the aggregated metric
45
+ for the groundedness pro label will be "groundedness_pro_passing_rate".
46
+ """
47
+
48
+ id = "azureml://registries/azureml/models/Groundedness-Pro-Evaluator/versions/1"
49
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
50
+
51
+ @override
52
+ def __init__(
53
+ self,
54
+ credential,
55
+ azure_ai_project,
56
+ **kwargs,
57
+ ):
58
+ self._passing_score = 5 # TODO update once the binarization PR is merged
59
+ self._output_prefix = "groundedness_pro"
60
+ super().__init__(
61
+ eval_metric=EvaluationMetrics.GROUNDEDNESS,
62
+ azure_ai_project=azure_ai_project,
63
+ credential=credential,
64
+ **kwargs,
65
+ )
66
+
67
+ @overload
68
+ def __call__(
69
+ self,
70
+ *,
71
+ response: str,
72
+ context: str,
73
+ query: str,
74
+ ) -> Dict[str, Union[str, bool]]:
75
+ """Evaluate groundedness for a given query/response/context
76
+
77
+ :keyword response: The response to be evaluated.
78
+ :paramtype response: str
79
+ :keyword context: The context to be evaluated.
80
+ :paramtype context: str
81
+ :keyword query: The query to be evaluated.
82
+ :paramtype query: Optional[str]
83
+ :return: The relevance score.
84
+ :rtype: Dict[str, Union[str, bool]]
85
+ """
86
+
87
+ @overload
88
+ def __call__(
89
+ self,
90
+ *,
91
+ conversation: Conversation,
92
+ ) -> Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]:
93
+ """Evaluate groundedness for a conversation for a multi-turn evaluation. If the conversation has
94
+ more than one turn, the evaluator will aggregate the results of each turn, with the per-turn results
95
+ available in the output under the "evaluation_per_turn" key.
96
+
97
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
98
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
99
+ to be dictionaries with keys "content", "role", and possibly "context".
100
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
101
+ :return: The relevance score.
102
+ :rtype: Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]
103
+ """
104
+
105
+ @override
106
+ def __call__( # pylint: disable=docstring-missing-param
107
+ self,
108
+ *args,
109
+ **kwargs,
110
+ ):
111
+ """Evaluate groundedness. Accepts either a query, response and context for a single-turn evaluation, or a
112
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
113
+ the evaluator will aggregate the results of each turn, with the per-turn results available
114
+ in the output under the "evaluation_per_turn" key.
115
+
116
+ :keyword query: The query to be evaluated.
117
+ :paramtype query: Optional[str]
118
+ :keyword response: The response to be evaluated.
119
+ :paramtype response: Optional[str]
120
+ :keyword context: The context to be evaluated.
121
+ :paramtype context: Optional[str]
122
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
123
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
124
+ to be dictionaries with keys "content", "role", and possibly "context".
125
+ :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
126
+ :return: The relevance score.
127
+ :rtype: Union[Dict[str, Union[str, bool]], Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]]
128
+ """
129
+ return super().__call__(*args, **kwargs)
130
+
131
+ @override
132
+ async def _do_eval(self, eval_input: Dict):
133
+ """This evaluator has some unique post-processing that requires data that
134
+ the rai_service script is not currently built to handle. So we post-post-process
135
+ the result here to message it into the right form.
136
+
137
+ :param eval_input: The input to the evaluation function.
138
+ :type eval_input: Dict
139
+ :return: The evaluation result.
140
+ :rtype: Dict
141
+ """
142
+ result = await super()._do_eval(eval_input)
143
+ real_result = {}
144
+ real_result[self._output_prefix + "_label"] = (
145
+ result[EvaluationMetrics.GROUNDEDNESS + "_score"] >= self._passing_score
146
+ )
147
+ real_result[self._output_prefix + "_reason"] = result[EvaluationMetrics.GROUNDEDNESS + "_reason"]
148
+ return real_result
@@ -5,13 +5,11 @@
5
5
  import math
6
6
  import os
7
7
  import re
8
- from typing import Union
9
8
 
10
9
  from promptflow._utils.async_utils import async_run_allowing_running_loop
11
10
  from promptflow.core import AsyncPrompty
12
11
 
13
12
  from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
14
- from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
15
13
 
16
14
  from ..._common.utils import construct_prompty_model_config, validate_model_config
17
15
 
@@ -23,19 +21,19 @@ except ImportError:
23
21
 
24
22
  class _AsyncSimilarityEvaluator:
25
23
  # Constants must be defined within eval's directory to be save/loadable
26
- PROMPTY_FILE = "similarity.prompty"
27
- LLM_CALL_TIMEOUT = 600
28
- DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
24
+ _PROMPTY_FILE = "similarity.prompty"
25
+ _LLM_CALL_TIMEOUT = 600
26
+ _DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
29
27
 
30
- def __init__(self, model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration]):
28
+ def __init__(self, model_config: dict):
31
29
  prompty_model_config = construct_prompty_model_config(
32
- model_config,
33
- self.DEFAULT_OPEN_API_VERSION,
30
+ validate_model_config(model_config),
31
+ self._DEFAULT_OPEN_API_VERSION,
34
32
  USER_AGENT,
35
33
  )
36
34
 
37
35
  current_dir = os.path.dirname(__file__)
38
- prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
36
+ prompty_path = os.path.join(current_dir, self._PROMPTY_FILE)
39
37
  self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
40
38
 
41
39
  async def __call__(self, *, query: str, response: str, ground_truth: str, **kwargs):
@@ -68,7 +66,7 @@ class _AsyncSimilarityEvaluator:
68
66
 
69
67
  # Run the evaluation flow
70
68
  llm_output = await self._flow(
71
- query=query, response=response, ground_truth=ground_truth, timeout=self.LLM_CALL_TIMEOUT, **kwargs
69
+ query=query, response=response, ground_truth=ground_truth, timeout=self._LLM_CALL_TIMEOUT, **kwargs
72
70
  )
73
71
 
74
72
  score = math.nan
@@ -77,38 +75,49 @@ class _AsyncSimilarityEvaluator:
77
75
  if match:
78
76
  score = float(match.group())
79
77
 
80
- return {"gpt_similarity": float(score)}
78
+ return {"similarity": float(score), "gpt_similarity": float(score)}
81
79
 
82
80
 
83
81
  class SimilarityEvaluator:
84
82
  """
85
- Initialize a similarity evaluator configured for a specific Azure OpenAI model.
83
+ Evaluates similarity score for a given query, response, and ground truth or a multi-turn conversation.
84
+
85
+ The similarity measure evaluates the likeness between a ground truth sentence (or document) and the
86
+ AI model's generated prediction. This calculation involves creating sentence-level embeddings for both
87
+ the ground truth and the model's prediction, which are high-dimensional vector representations capturing
88
+ the semantic meaning and context of the sentences.
89
+
90
+ Use it when you want an objective evaluation of an AI model's performance, particularly in text generation
91
+ tasks where you have access to ground truth responses. Similarity enables you to assess the generated
92
+ text's semantic alignment with the desired content, helping to gauge the model's quality and accuracy.
93
+
94
+ Similarity scores range from 1 to 5, with 1 being the least similar and 5 being the most similar.
86
95
 
87
96
  :param model_config: Configuration for the Azure OpenAI model.
88
97
  :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
89
98
  ~azure.ai.evaluation.OpenAIModelConfiguration]
90
99
 
91
- **Usage**
100
+ .. admonition:: Example:
92
101
 
93
- .. code-block:: python
102
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
103
+ :start-after: [START rouge_score_evaluator]
104
+ :end-before: [END rouge_score_evaluator]
105
+ :language: python
106
+ :dedent: 8
107
+ :caption: Initialize and call a RougeScoreEvaluator with a four-gram rouge type.
94
108
 
95
- eval_fn = SimilarityEvaluator(model_config)
96
- result = eval_fn(
97
- query="What is the capital of Japan?",
98
- response="The capital of Japan is Tokyo.",
99
- ground_truth="Tokyo is Japan's capital.")
109
+ .. note::
100
110
 
101
- **Output format**
102
-
103
- .. code-block:: python
104
-
105
- {
106
- "gpt_similarity": 3.0
107
- }
111
+ To align with our support of a diverse set of models, an output key without the `gpt_` prefix has been added.
112
+ To maintain backwards compatibility, the old key with the `gpt_` prefix is still be present in the output;
113
+ however, it is recommended to use the new key moving forward as the old key will be deprecated in the future.
108
114
  """
109
115
 
110
- def __init__(self, model_config: dict):
111
- self._async_evaluator = _AsyncSimilarityEvaluator(validate_model_config(model_config))
116
+ id = "azureml://registries/azureml/models/Similarity-Evaluator/versions/3"
117
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
118
+
119
+ def __init__(self, model_config):
120
+ self._async_evaluator = _AsyncSimilarityEvaluator(model_config)
112
121
 
113
122
  def __call__(self, *, query: str, response: str, ground_truth: str, **kwargs):
114
123
  """