azure-ai-evaluation 0.0.0b0__py3-none-any.whl → 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (100) hide show
  1. azure/ai/evaluation/__init__.py +60 -0
  2. azure/ai/evaluation/_common/__init__.py +16 -0
  3. azure/ai/evaluation/_common/constants.py +65 -0
  4. azure/ai/evaluation/_common/rai_service.py +452 -0
  5. azure/ai/evaluation/_common/utils.py +87 -0
  6. azure/ai/evaluation/_constants.py +50 -0
  7. azure/ai/evaluation/_evaluate/__init__.py +3 -0
  8. azure/ai/evaluation/_evaluate/_batch_run_client/__init__.py +8 -0
  9. azure/ai/evaluation/_evaluate/_batch_run_client/batch_run_context.py +72 -0
  10. azure/ai/evaluation/_evaluate/_batch_run_client/code_client.py +150 -0
  11. azure/ai/evaluation/_evaluate/_batch_run_client/proxy_client.py +61 -0
  12. azure/ai/evaluation/_evaluate/_eval_run.py +494 -0
  13. azure/ai/evaluation/_evaluate/_evaluate.py +689 -0
  14. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +174 -0
  15. azure/ai/evaluation/_evaluate/_utils.py +237 -0
  16. azure/ai/evaluation/_evaluators/__init__.py +3 -0
  17. azure/ai/evaluation/_evaluators/_bleu/__init__.py +9 -0
  18. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +73 -0
  19. azure/ai/evaluation/_evaluators/_chat/__init__.py +9 -0
  20. azure/ai/evaluation/_evaluators/_chat/_chat.py +350 -0
  21. azure/ai/evaluation/_evaluators/_chat/retrieval/__init__.py +9 -0
  22. azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +163 -0
  23. azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +48 -0
  24. azure/ai/evaluation/_evaluators/_coherence/__init__.py +7 -0
  25. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +122 -0
  26. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +62 -0
  27. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +21 -0
  28. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +108 -0
  29. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +66 -0
  30. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +296 -0
  31. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +78 -0
  32. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +76 -0
  33. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +76 -0
  34. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +76 -0
  35. azure/ai/evaluation/_evaluators/_eci/__init__.py +0 -0
  36. azure/ai/evaluation/_evaluators/_eci/_eci.py +99 -0
  37. azure/ai/evaluation/_evaluators/_f1_score/__init__.py +9 -0
  38. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +141 -0
  39. azure/ai/evaluation/_evaluators/_fluency/__init__.py +9 -0
  40. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +122 -0
  41. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +61 -0
  42. azure/ai/evaluation/_evaluators/_gleu/__init__.py +9 -0
  43. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +71 -0
  44. azure/ai/evaluation/_evaluators/_groundedness/__init__.py +9 -0
  45. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +123 -0
  46. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +54 -0
  47. azure/ai/evaluation/_evaluators/_meteor/__init__.py +9 -0
  48. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +96 -0
  49. azure/ai/evaluation/_evaluators/_protected_material/__init__.py +5 -0
  50. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +104 -0
  51. azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +5 -0
  52. azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +104 -0
  53. azure/ai/evaluation/_evaluators/_qa/__init__.py +9 -0
  54. azure/ai/evaluation/_evaluators/_qa/_qa.py +111 -0
  55. azure/ai/evaluation/_evaluators/_relevance/__init__.py +9 -0
  56. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +131 -0
  57. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +69 -0
  58. azure/ai/evaluation/_evaluators/_rouge/__init__.py +10 -0
  59. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +98 -0
  60. azure/ai/evaluation/_evaluators/_similarity/__init__.py +9 -0
  61. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +130 -0
  62. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +71 -0
  63. azure/ai/evaluation/_evaluators/_xpia/__init__.py +5 -0
  64. azure/ai/evaluation/_evaluators/_xpia/xpia.py +140 -0
  65. azure/ai/evaluation/_exceptions.py +107 -0
  66. azure/ai/evaluation/_http_utils.py +395 -0
  67. azure/ai/evaluation/_model_configurations.py +27 -0
  68. azure/ai/evaluation/_user_agent.py +6 -0
  69. azure/ai/evaluation/_version.py +5 -0
  70. azure/ai/evaluation/py.typed +0 -0
  71. azure/ai/evaluation/simulator/__init__.py +15 -0
  72. azure/ai/evaluation/simulator/_adversarial_scenario.py +27 -0
  73. azure/ai/evaluation/simulator/_adversarial_simulator.py +450 -0
  74. azure/ai/evaluation/simulator/_constants.py +17 -0
  75. azure/ai/evaluation/simulator/_conversation/__init__.py +315 -0
  76. azure/ai/evaluation/simulator/_conversation/_conversation.py +178 -0
  77. azure/ai/evaluation/simulator/_conversation/constants.py +30 -0
  78. azure/ai/evaluation/simulator/_direct_attack_simulator.py +252 -0
  79. azure/ai/evaluation/simulator/_helpers/__init__.py +4 -0
  80. azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +17 -0
  81. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +93 -0
  82. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +207 -0
  83. azure/ai/evaluation/simulator/_model_tools/__init__.py +23 -0
  84. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +147 -0
  85. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +228 -0
  86. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +157 -0
  87. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +157 -0
  88. azure/ai/evaluation/simulator/_model_tools/models.py +616 -0
  89. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +69 -0
  90. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +36 -0
  91. azure/ai/evaluation/simulator/_tracing.py +92 -0
  92. azure/ai/evaluation/simulator/_utils.py +111 -0
  93. azure/ai/evaluation/simulator/simulator.py +579 -0
  94. azure_ai_evaluation-1.0.0b1.dist-info/METADATA +377 -0
  95. azure_ai_evaluation-1.0.0b1.dist-info/RECORD +97 -0
  96. {azure_ai_evaluation-0.0.0b0.dist-info → azure_ai_evaluation-1.0.0b1.dist-info}/WHEEL +1 -1
  97. azure_ai_evaluation-1.0.0b1.dist-info/top_level.txt +1 -0
  98. azure_ai_evaluation-0.0.0b0.dist-info/METADATA +0 -7
  99. azure_ai_evaluation-0.0.0b0.dist-info/RECORD +0 -4
  100. azure_ai_evaluation-0.0.0b0.dist-info/top_level.txt +0 -1
@@ -0,0 +1,78 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
5
+ from azure.ai.evaluation._common.constants import EvaluationMetrics
6
+ from azure.ai.evaluation._model_configurations import AzureAIProject
7
+
8
+ try:
9
+ from ._content_safety_base import ContentSafetyEvaluatorBase
10
+ except ImportError:
11
+ from _content_safety_base import ContentSafetyEvaluatorBase
12
+
13
+
14
+ class _AsyncHateUnfairnessEvaluator(ContentSafetyEvaluatorBase):
15
+ def __init__(self, azure_ai_project: dict, credential=None):
16
+ # Hate_fairness is the actual backend metric name. Which, uh, doesn't sound great.
17
+ # so invert the name.
18
+ super().__init__(
19
+ metric=EvaluationMetrics.HATE_FAIRNESS,
20
+ azure_ai_project=azure_ai_project,
21
+ credential=credential,
22
+ )
23
+
24
+ async def __call__(self, *, query: str, response: str, **kwargs):
25
+ return await super().__call__(query=query, response=response, **kwargs)
26
+
27
+
28
+ class HateUnfairnessEvaluator:
29
+ """
30
+ Initialize a hate-unfairness evaluator for hate unfairness score.
31
+
32
+ :param azure_ai_project: The scope of the Azure AI project.
33
+ It contains subscription id, resource group, and project name.
34
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
35
+ :param credential: The credential for connecting to Azure AI project.
36
+ :type credential: ~azure.core.credentials.TokenCredential
37
+
38
+ **Usage**
39
+
40
+ .. code-block:: python
41
+
42
+ azure_ai_project = {
43
+ "subscription_id": "<subscription_id>",
44
+ "resource_group_name": "<resource_group_name>",
45
+ "project_name": "<project_name>",
46
+ }
47
+ eval_fn = HateUnfairnessEvaluator(azure_ai_project)
48
+ result = eval_fn(query="What is the capital of France?", response="Paris.")
49
+
50
+ **Output format**
51
+
52
+ .. code-block:: python
53
+
54
+ {
55
+ "hate_unfairness": "High",
56
+ "hate_unfairness_score": 6.5,
57
+ "hate_unfairness_reason": "Some reason"
58
+ }
59
+ """
60
+
61
+ def __init__(self, azure_ai_project: dict, credential=None) -> None:
62
+ self._async_evaluator = _AsyncHateUnfairnessEvaluator(azure_ai_project, credential)
63
+
64
+ def __call__(self, *, query: str, response: str, **kwargs):
65
+ """
66
+ Evaluates hate unfairness content.
67
+
68
+ :keyword query: The query to be evaluated.
69
+ :paramtype query: str
70
+ :keyword response: The response to be evaluated.
71
+ :paramtype response: str
72
+ :return: The hate unfairness score.
73
+ :rtype: dict
74
+ """
75
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
76
+
77
+ def _to_async(self):
78
+ return self._async_evaluator
@@ -0,0 +1,76 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
5
+ from azure.ai.evaluation._common.constants import EvaluationMetrics
6
+ from azure.ai.evaluation._model_configurations import AzureAIProject
7
+
8
+ try:
9
+ from ._content_safety_base import ContentSafetyEvaluatorBase
10
+ except ImportError:
11
+ from _content_safety_base import ContentSafetyEvaluatorBase
12
+
13
+
14
+ class _AsyncSelfHarmEvaluator(ContentSafetyEvaluatorBase):
15
+ def __init__(self, azure_ai_project: dict, credential=None):
16
+ super().__init__(
17
+ metric=EvaluationMetrics.SELF_HARM,
18
+ azure_ai_project=azure_ai_project,
19
+ credential=credential,
20
+ )
21
+
22
+ async def __call__(self, *, query: str, response: str, **kwargs):
23
+ return await super().__call__(query=query, response=response, **kwargs)
24
+
25
+
26
+ class SelfHarmEvaluator:
27
+ """
28
+ Initialize a self harm evaluator for self harm score.
29
+
30
+ :param azure_ai_project: The scope of the Azure AI project.
31
+ It contains subscription id, resource group, and project name.
32
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
+ :param credential: The credential for connecting to Azure AI project.
34
+ :type credential: ~azure.core.credentials.TokenCredential
35
+
36
+ **Usage**
37
+
38
+ .. code-block:: python
39
+
40
+ azure_ai_project = {
41
+ "subscription_id": "<subscription_id>",
42
+ "resource_group_name": "<resource_group_name>",
43
+ "project_name": "<project_name>",
44
+ }
45
+ eval_fn = SelfHarmEvaluator(azure_ai_project)
46
+ result = eval_fn(query="What is the capital of France?", response="Paris.")
47
+
48
+ **Output format**
49
+
50
+ .. code-block:: python
51
+
52
+ {
53
+ "self_harm": "High",
54
+ "self_harm_score": 6.5,
55
+ "self_harm_reason": "Some reason"
56
+ }
57
+ """
58
+
59
+ def __init__(self, azure_ai_project: dict, credential=None):
60
+ self._async_evaluator = _AsyncSelfHarmEvaluator(azure_ai_project, credential)
61
+
62
+ def __call__(self, *, query: str, response: str, **kwargs):
63
+ """
64
+ Evaluates self harm content.
65
+
66
+ :keyword query: The query to be evaluated.
67
+ :paramtype query: str
68
+ :keyword response: The response to be evaluated.
69
+ :paramtype response: str
70
+ :return: The self harm score.
71
+ :rtype: dict
72
+ """
73
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
+
75
+ def _to_async(self):
76
+ return self._async_evaluator
@@ -0,0 +1,76 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
5
+ from azure.ai.evaluation._common.constants import EvaluationMetrics
6
+ from azure.ai.evaluation._model_configurations import AzureAIProject
7
+
8
+ try:
9
+ from ._content_safety_base import ContentSafetyEvaluatorBase
10
+ except ImportError:
11
+ from _content_safety_base import ContentSafetyEvaluatorBase
12
+
13
+
14
+ class _AsyncSexualEvaluator(ContentSafetyEvaluatorBase):
15
+ def __init__(self, azure_ai_project: dict, credential=None):
16
+ super().__init__(
17
+ metric=EvaluationMetrics.SEXUAL,
18
+ azure_ai_project=azure_ai_project,
19
+ credential=credential,
20
+ )
21
+
22
+ async def __call__(self, *, query: str, response: str, **kwargs):
23
+ return await super().__call__(query=query, response=response, **kwargs)
24
+
25
+
26
+ class SexualEvaluator:
27
+ """
28
+ Initialize a sexual evaluator for sexual score.
29
+
30
+ :param azure_ai_project: The scope of the Azure AI project.
31
+ It contains subscription id, resource group, and project name.
32
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
+ :param credential: The credential for connecting to Azure AI project.
34
+ :type credential: ~azure.core.credentials.TokenCredential
35
+
36
+ **Usage**
37
+
38
+ .. code-block:: python
39
+
40
+ azure_ai_project = {
41
+ "subscription_id": "<subscription_id>",
42
+ "resource_group_name": "<resource_group_name>",
43
+ "project_name": "<project_name>",
44
+ }
45
+ eval_fn = SexualEvaluator(azure_ai_project)
46
+ result = eval_fn(query="What is the capital of France?", response="Paris.")
47
+
48
+ **Output format**
49
+
50
+ .. code-block:: python
51
+
52
+ {
53
+ "sexual": "High",
54
+ "sexual_score": 6.5,
55
+ "sexual_reason": "Some reason"
56
+ }
57
+ """
58
+
59
+ def __init__(self, azure_ai_project: dict, credential=None):
60
+ self._async_evaluator = _AsyncSexualEvaluator(azure_ai_project, credential)
61
+
62
+ def __call__(self, *, query: str, response: str, **kwargs):
63
+ """
64
+ Evaluates sexual content.
65
+
66
+ :keyword query: The query to be evaluated.
67
+ :paramtype query: str
68
+ :keyword response: The response to be evaluated.
69
+ :paramtype response: str
70
+ :return: The sexual score.
71
+ :rtype: dict
72
+ """
73
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
+
75
+ def _to_async(self):
76
+ return self._async_evaluator
@@ -0,0 +1,76 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
5
+ from azure.ai.evaluation._common.constants import EvaluationMetrics
6
+ from azure.ai.evaluation._model_configurations import AzureAIProject
7
+
8
+ try:
9
+ from ._content_safety_base import ContentSafetyEvaluatorBase
10
+ except ImportError:
11
+ from _content_safety_base import ContentSafetyEvaluatorBase
12
+
13
+
14
+ class _AsyncViolenceEvaluator(ContentSafetyEvaluatorBase):
15
+ def __init__(self, azure_ai_project: dict, credential=None):
16
+ super().__init__(
17
+ metric=EvaluationMetrics.VIOLENCE,
18
+ azure_ai_project=azure_ai_project,
19
+ credential=credential,
20
+ )
21
+
22
+ async def __call__(self, *, query: str, response: str, **kwargs):
23
+ return await super().__call__(query=query, response=response, **kwargs)
24
+
25
+
26
+ class ViolenceEvaluator:
27
+ """
28
+ Initialize a violence evaluator for violence score.
29
+
30
+ :param azure_ai_project: The scope of the Azure AI project.
31
+ It contains subscription id, resource group, and project name.
32
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
+ :param credential: The credential for connecting to Azure AI project.
34
+ :type credential: ~azure.core.credentials.TokenCredential
35
+
36
+ **Usage**
37
+
38
+ .. code-block:: python
39
+
40
+ azure_ai_project = {
41
+ "subscription_id": "<subscription_id>",
42
+ "resource_group_name": "<resource_group_name>",
43
+ "project_name": "<project_name>",
44
+ }
45
+ eval_fn = ViolenceEvaluator(azure_ai_project)
46
+ result = eval_fn(query="What is the capital of France?", response="Paris.")
47
+
48
+ **Output format**
49
+
50
+ .. code-block:: python
51
+
52
+ {
53
+ "violence": "High",
54
+ "violence_score": 6.5,
55
+ "violence_reason": "Some reason"
56
+ }
57
+ """
58
+
59
+ def __init__(self, azure_ai_project: dict, credential=None):
60
+ self._async_evaluator = _AsyncViolenceEvaluator(azure_ai_project, credential)
61
+
62
+ def __call__(self, *, query: str, response: str, **kwargs):
63
+ """
64
+ Evaluates violence content.
65
+
66
+ :keyword query: The query to be evaluated.
67
+ :paramtype query: str
68
+ :keyword response: The response to be evaluated.
69
+ :paramtype response: str
70
+ :return: The violence score.
71
+ :rtype: dict
72
+ """
73
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
+
75
+ def _to_async(self):
76
+ return self._async_evaluator
File without changes
@@ -0,0 +1,99 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
5
+ from azure.ai.evaluation._common.constants import _InternalEvaluationMetrics
6
+ from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service
7
+ from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
8
+ from azure.ai.evaluation._model_configurations import AzureAIProject
9
+
10
+
11
+ class _AsyncECIEvaluator:
12
+ def __init__(self, azure_ai_project: dict, credential=None):
13
+ self._azure_ai_project = azure_ai_project
14
+ self._credential = credential
15
+
16
+ async def __call__(self, *, query: str, response: str, **kwargs):
17
+ # Validate inputs
18
+ # Raises value error if failed, so execution alone signifies success.
19
+ if not (query and query.strip() and query != "None") or not (
20
+ response and response.strip() and response != "None"
21
+ ):
22
+ msg = "Both 'query' and 'response' must be non-empty strings."
23
+ raise EvaluationException(
24
+ message=msg,
25
+ internal_message=msg,
26
+ error_category=ErrorCategory.MISSING_FIELD,
27
+ error_blame=ErrorBlame.USER_ERROR,
28
+ error_target=ErrorTarget.ECI_EVALUATOR,
29
+ )
30
+
31
+ # Run score computation based on supplied metric.
32
+ result = await evaluate_with_rai_service(
33
+ metric_name=_InternalEvaluationMetrics.ECI,
34
+ query=query,
35
+ response=response,
36
+ project_scope=self._azure_ai_project,
37
+ credential=self._credential,
38
+ )
39
+ return result
40
+
41
+
42
+ class ECIEvaluator:
43
+ """
44
+ Initialize an ECI evaluator to evaluate ECI based on the following guidelines:
45
+ Detects whether ECI is present without a disclaimer in the AI system’s response.
46
+ After the response is annotated by GPT using the binary annotation prompt
47
+ (which checks for disclaimer or authoritative sources),
48
+ we now want to check, if there was a disclaimer, the language should be exactly
49
+ "AI-generated content may be incorrect. If you are seeking ECI-related information, please go to Bing Search."
50
+ Outputs True or False with AI-generated reasoning.
51
+
52
+ :param azure_ai_project: The scope of the Azure AI project.
53
+ It contains subscription id, resource group, and project name.
54
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
55
+ :param credential: The credential for connecting to Azure AI project.
56
+ :type credential: ~azure.core.credentials.TokenCredential
57
+ :return: Whether or not ECI was found in the response without a disclaimer, with AI-generated reasoning
58
+ :rtype: Dict[str, str]
59
+
60
+ **Usage**
61
+
62
+ .. code-block:: python
63
+
64
+ azure_ai_project = {
65
+ "subscription_id": "<subscription_id>",
66
+ "resource_group_name": "<resource_group_name>",
67
+ "project_name": "<project_name>",
68
+ }
69
+ eval_fn = ECIEvaluator(azure_ai_project)
70
+ result = eval_fn(query="What is the capital of France?", response="Paris.")
71
+
72
+ **Output format**
73
+
74
+ .. code-block:: python
75
+
76
+ {
77
+ "eci_label": "False",
78
+ "eci_reason": "Some reason."
79
+ }
80
+ """
81
+
82
+ def __init__(self, azure_ai_project: dict, credential=None) -> None:
83
+ self._async_evaluator = _AsyncECIEvaluator(azure_ai_project, credential)
84
+
85
+ def __call__(self, *, query: str, response: str, **kwargs):
86
+ """
87
+ Evaluates ECI content.
88
+
89
+ :keyword query: The query to be evaluated.
90
+ :paramtype query: str
91
+ :keyword response: The response to be evaluated.
92
+ :paramtype response: str
93
+ :return: The ECI result.
94
+ :rtype: dict
95
+ """
96
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
97
+
98
+ def _to_async(self):
99
+ return self._async_evaluator
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._f1_score import F1ScoreEvaluator
6
+
7
+ __all__ = [
8
+ "F1ScoreEvaluator",
9
+ ]
@@ -0,0 +1,141 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from collections import Counter
6
+ from typing import List
7
+
8
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
9
+ from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
10
+
11
+
12
+ class _AsyncF1ScoreEvaluator:
13
+ def __init__(self):
14
+ pass
15
+
16
+ async def __call__(self, *, response: str, ground_truth: str, **kwargs):
17
+ # Validate inputs
18
+ if not (response and response.strip() and response != "None") or not (
19
+ ground_truth and ground_truth.strip() and ground_truth != "None"
20
+ ):
21
+ msg = "Both 'response' and 'ground_truth' must be non-empty strings."
22
+ raise EvaluationException(
23
+ message=msg,
24
+ internal_message=msg,
25
+ error_category=ErrorCategory.MISSING_FIELD,
26
+ error_blame=ErrorBlame.USER_ERROR,
27
+ error_target=ErrorTarget.F1_EVALUATOR,
28
+ )
29
+
30
+ # Run f1 score computation.
31
+ f1_result = self._compute_f1_score(response=response, ground_truth=ground_truth)
32
+
33
+ return {"f1_score": f1_result}
34
+
35
+ @classmethod
36
+ def _compute_f1_score(cls, response: str, ground_truth: str) -> str:
37
+ import re
38
+ import string
39
+
40
+ class QASplitTokenizer:
41
+ """Quality assurance tokenizer that splits text on whitespace."""
42
+
43
+ def __call__(self, line) -> List[str]:
44
+ """Tokenizes an input line using split() on whitespace
45
+
46
+ :param line: The input segment to be tokenized
47
+ :type line: str
48
+ :return: The tokenized segment
49
+ :rtype: List[str]
50
+ """
51
+
52
+ return line.split()
53
+
54
+ def normalize_text(text: str) -> str:
55
+ """Lower text and remove punctuation, articles and extra whitespace.
56
+
57
+ :param text: The text to be normalized
58
+ :type text: str
59
+ :return: The normalized text
60
+ :rtype: str
61
+ """
62
+
63
+ def remove_articles(text):
64
+ return re.sub(r"\b(a|an|the)\b", " ", text)
65
+
66
+ def white_space_fix(text):
67
+ return " ".join(text.split())
68
+
69
+ def remove_punctuation(text):
70
+ exclude = set(string.punctuation)
71
+ return "".join(ch for ch in text if ch not in exclude)
72
+
73
+ def lower(text):
74
+ return text.lower()
75
+
76
+ return white_space_fix(remove_articles(remove_punctuation(lower(text))))
77
+
78
+ prediction_tokens = normalize_text(response)
79
+ reference_tokens = normalize_text(ground_truth)
80
+ tokenizer = QASplitTokenizer()
81
+ prediction_tokens = tokenizer(prediction_tokens)
82
+ reference_tokens = tokenizer(reference_tokens)
83
+
84
+ common_tokens = Counter(prediction_tokens) & Counter(reference_tokens)
85
+ num_common_tokens = sum(common_tokens.values())
86
+
87
+ if num_common_tokens == 0:
88
+ f1 = 0.0
89
+ else:
90
+ precision = 1.0 * num_common_tokens / len(prediction_tokens)
91
+ recall = 1.0 * num_common_tokens / len(reference_tokens)
92
+
93
+ f1 = (2.0 * precision * recall) / (precision + recall)
94
+
95
+ return f1
96
+
97
+
98
+ class F1ScoreEvaluator:
99
+ """
100
+ Initialize a f1 score evaluator for calculating F1 score.
101
+
102
+ **Usage**
103
+
104
+ .. code-block:: python
105
+
106
+ eval_fn = F1ScoreEvaluator()
107
+ result = eval_fn(
108
+ response="The capital of Japan is Tokyo.",
109
+ ground_truth="Tokyo is Japan's capital, known for its blend of traditional culture \
110
+ and technological advancements.")
111
+
112
+ **Output format**
113
+
114
+ .. code-block:: python
115
+
116
+ {
117
+ "f1_score": 0.42
118
+ }
119
+ """
120
+
121
+ def __init__(self):
122
+ self._async_evaluator = _AsyncF1ScoreEvaluator()
123
+
124
+ def __call__(self, *, response: str, ground_truth: str, **kwargs):
125
+ """
126
+ Evaluate F1 score.
127
+
128
+ :keyword response: The response to be evaluated.
129
+ :paramtype response: str
130
+ :keyword ground_truth: The ground truth to be evaluated.
131
+ :paramtype ground_truth: str
132
+ :return: The F1 score.
133
+ :rtype: dict
134
+ """
135
+
136
+ return async_run_allowing_running_loop(
137
+ self._async_evaluator, response=response, ground_truth=ground_truth, **kwargs
138
+ )
139
+
140
+ def _to_async(self):
141
+ return self._async_evaluator
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from ._fluency import FluencyEvaluator
6
+
7
+ __all__ = [
8
+ "FluencyEvaluator",
9
+ ]
@@ -0,0 +1,122 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ import os
6
+ import re
7
+ from typing import Union
8
+
9
+ import numpy as np
10
+
11
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
12
+ from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
13
+ from promptflow.core import AsyncPrompty
14
+
15
+ from ..._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
16
+ from ..._common.utils import (
17
+ check_and_add_api_version_for_aoai_model_config,
18
+ check_and_add_user_agent_for_aoai_model_config,
19
+ )
20
+
21
+ try:
22
+ from ..._user_agent import USER_AGENT
23
+ except ImportError:
24
+ USER_AGENT = None
25
+
26
+
27
+ class _AsyncFluencyEvaluator:
28
+ # Constants must be defined within eval's directory to be save/loadable
29
+ PROMPTY_FILE = "fluency.prompty"
30
+ LLM_CALL_TIMEOUT = 600
31
+ DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
32
+
33
+ def __init__(self, model_config: dict):
34
+ check_and_add_api_version_for_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
35
+
36
+ prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
37
+
38
+ # Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
39
+ # https://github.com/encode/httpx/discussions/2959
40
+ prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
41
+
42
+ check_and_add_user_agent_for_aoai_model_config(
43
+ model_config,
44
+ prompty_model_config,
45
+ USER_AGENT,
46
+ )
47
+
48
+ current_dir = os.path.dirname(__file__)
49
+ prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
50
+ self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
51
+
52
+ async def __call__(self, *, query: str, response: str, **kwargs):
53
+ # Validate input parameters
54
+ query = str(query or "")
55
+ response = str(response or "")
56
+
57
+ if not (query.strip() and response.strip()):
58
+ msg = "Both 'query' and 'response' must be non-empty strings."
59
+ raise EvaluationException(
60
+ message=msg,
61
+ internal_message=msg,
62
+ error_category=ErrorCategory.MISSING_FIELD,
63
+ error_blame=ErrorBlame.USER_ERROR,
64
+ error_target=ErrorTarget.F1_EVALUATOR,
65
+ )
66
+
67
+ # Run the evaluation flow
68
+ llm_output = await self._flow(query=query, response=response, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
69
+
70
+ score = np.nan
71
+ if llm_output:
72
+ match = re.search(r"\d", llm_output)
73
+ if match:
74
+ score = float(match.group())
75
+
76
+ return {"gpt_fluency": float(score)}
77
+
78
+
79
+ class FluencyEvaluator:
80
+ """
81
+ Initialize a fluency evaluator configured for a specific Azure OpenAI model.
82
+
83
+ :param model_config: Configuration for the Azure OpenAI model.
84
+ :type model_config: Union[~azure.ai.evaluation.AzureOpenAIModelConfiguration,
85
+ ~azure.ai.evaluation.OpenAIModelConfiguration]
86
+
87
+ **Usage**
88
+
89
+ .. code-block:: python
90
+
91
+ eval_fn = FluencyEvaluator(model_config)
92
+ result = eval_fn(
93
+ query="What is the capital of Japan?",
94
+ response="The capital of Japan is Tokyo.")
95
+
96
+ **Output format**
97
+
98
+ .. code-block:: python
99
+
100
+ {
101
+ "gpt_fluency": 4.0
102
+ }
103
+ """
104
+
105
+ def __init__(self, model_config: dict):
106
+ self._async_evaluator = _AsyncFluencyEvaluator(model_config)
107
+
108
+ def __call__(self, *, query: str, response: str, **kwargs):
109
+ """
110
+ Evaluate fluency.
111
+
112
+ :keyword query: The query to be evaluated.
113
+ :paramtype query: str
114
+ :keyword response: The response to be evaluated.
115
+ :paramtype response: str
116
+ :return: The fluency score.
117
+ :rtype: dict
118
+ """
119
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
120
+
121
+ def _to_async(self):
122
+ return self._async_evaluator