azure-ai-evaluation 1.0.0b1__py3-none-any.whl → 1.0.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (76) hide show
  1. azure/ai/evaluation/__init__.py +4 -4
  2. azure/ai/evaluation/_common/rai_service.py +4 -4
  3. azure/ai/evaluation/_common/utils.py +40 -25
  4. azure/ai/evaluation/_constants.py +13 -0
  5. azure/ai/evaluation/_evaluate/_batch_run_client/batch_run_context.py +2 -1
  6. azure/ai/evaluation/_evaluate/_batch_run_client/code_client.py +39 -17
  7. azure/ai/evaluation/_evaluate/_batch_run_client/proxy_client.py +23 -13
  8. azure/ai/evaluation/_evaluate/_eval_run.py +38 -18
  9. azure/ai/evaluation/_evaluate/_evaluate.py +88 -63
  10. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +13 -8
  11. azure/ai/evaluation/_evaluate/_utils.py +29 -22
  12. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +1 -1
  13. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +34 -86
  14. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +0 -5
  15. azure/ai/evaluation/_evaluators/_common/__init__.py +13 -0
  16. azure/ai/evaluation/_evaluators/_common/_base_eval.py +302 -0
  17. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +79 -0
  18. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +99 -0
  19. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +0 -4
  20. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +0 -2
  21. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +9 -4
  22. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +18 -41
  23. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +18 -39
  24. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +18 -39
  25. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +18 -39
  26. azure/ai/evaluation/_evaluators/_eci/_eci.py +18 -55
  27. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +2 -1
  28. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +29 -79
  29. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +0 -5
  30. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +1 -1
  31. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +33 -85
  32. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -5
  33. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +1 -0
  34. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +18 -65
  35. azure/ai/evaluation/_evaluators/_qa/_qa.py +3 -14
  36. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +34 -88
  37. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +0 -5
  38. azure/ai/evaluation/_evaluators/{_chat → _retrieval}/__init__.py +2 -2
  39. azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/_retrieval.py +17 -29
  40. azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/retrieval.prompty +0 -5
  41. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +3 -2
  42. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +5 -18
  43. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +0 -5
  44. azure/ai/evaluation/_evaluators/_xpia/xpia.py +16 -91
  45. azure/ai/evaluation/_exceptions.py +0 -1
  46. azure/ai/evaluation/_http_utils.py +3 -3
  47. azure/ai/evaluation/_model_configurations.py +36 -8
  48. azure/ai/evaluation/_version.py +1 -1
  49. azure/ai/evaluation/simulator/__init__.py +1 -1
  50. azure/ai/evaluation/simulator/_adversarial_simulator.py +8 -6
  51. azure/ai/evaluation/simulator/_conversation/__init__.py +1 -1
  52. azure/ai/evaluation/simulator/_conversation/_conversation.py +16 -16
  53. azure/ai/evaluation/simulator/_direct_attack_simulator.py +6 -6
  54. azure/ai/evaluation/simulator/_helpers/__init__.py +3 -2
  55. azure/ai/evaluation/simulator/_helpers/_experimental.py +157 -0
  56. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +11 -29
  57. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +6 -6
  58. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +2 -3
  59. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +18 -11
  60. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +1 -1
  61. azure/ai/evaluation/simulator/_model_tools/models.py +9 -11
  62. azure/ai/evaluation/simulator/_prompty/__init__.py +0 -0
  63. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +2 -1
  64. azure/ai/evaluation/simulator/{simulator.py → _simulator.py} +166 -88
  65. azure/ai/evaluation/simulator/_tracing.py +21 -24
  66. azure/ai/evaluation/simulator/_utils.py +4 -1
  67. {azure_ai_evaluation-1.0.0b1.dist-info → azure_ai_evaluation-1.0.0b3.dist-info}/METADATA +144 -14
  68. azure_ai_evaluation-1.0.0b3.dist-info/RECORD +98 -0
  69. azure/ai/evaluation/_evaluators/_chat/_chat.py +0 -350
  70. azure/ai/evaluation/_evaluators/_chat/retrieval/__init__.py +0 -9
  71. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +0 -66
  72. azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +0 -5
  73. azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +0 -104
  74. azure_ai_evaluation-1.0.0b1.dist-info/RECORD +0 -97
  75. {azure_ai_evaluation-1.0.0b1.dist-info → azure_ai_evaluation-1.0.0b3.dist-info}/WHEEL +0 -0
  76. {azure_ai_evaluation-1.0.0b1.dist-info → azure_ai_evaluation-1.0.0b3.dist-info}/top_level.txt +0 -0
@@ -7,9 +7,8 @@ from typing import Dict, List
7
7
 
8
8
  import numpy as np
9
9
  from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
10
- from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
11
10
 
12
- from azure.ai.evaluation._model_configurations import AzureAIProject
11
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
13
12
 
14
13
  try:
15
14
  from ._hate_unfairness import HateUnfairnessEvaluator
@@ -165,7 +164,10 @@ class ContentSafetyChatEvaluator:
165
164
  return score
166
165
  except Exception as e: # pylint: disable=broad-exception-caught
167
166
  logger.warning(
168
- f"Evaluator {evaluator.__class__.__name__} failed for turn {turn_num + 1} with exception: {e}"
167
+ "Evaluator %s failed for turn %s with exception: %s",
168
+ evaluator.__class__.__name__,
169
+ turn_num + 1,
170
+ e,
169
171
  )
170
172
  return {}
171
173
 
@@ -235,7 +237,10 @@ class ContentSafetyChatEvaluator:
235
237
  )
236
238
 
237
239
  if "role" not in turn or "content" not in turn:
238
- msg = f"Each turn in 'conversation' must have 'role' and 'content' keys. Turn number: {one_based_turn_num}"
240
+ msg = (
241
+ "Each turn in 'conversation' must have 'role' and 'content' keys. "
242
+ + f"Turn number: {one_based_turn_num}"
243
+ )
239
244
  raise EvaluationException(
240
245
  message=msg,
241
246
  internal_message=msg,
@@ -1,31 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
4
+ from typing import Optional
5
+ from typing_extensions import override
5
6
  from azure.ai.evaluation._common.constants import EvaluationMetrics
6
- from azure.ai.evaluation._model_configurations import AzureAIProject
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
7
8
 
8
- try:
9
- from ._content_safety_base import ContentSafetyEvaluatorBase
10
- except ImportError:
11
- from _content_safety_base import ContentSafetyEvaluatorBase
12
9
 
13
-
14
- class _AsyncHateUnfairnessEvaluator(ContentSafetyEvaluatorBase):
15
- def __init__(self, azure_ai_project: dict, credential=None):
16
- # Hate_fairness is the actual backend metric name. Which, uh, doesn't sound great.
17
- # so invert the name.
18
- super().__init__(
19
- metric=EvaluationMetrics.HATE_FAIRNESS,
20
- azure_ai_project=azure_ai_project,
21
- credential=credential,
22
- )
23
-
24
- async def __call__(self, *, query: str, response: str, **kwargs):
25
- return await super().__call__(query=query, response=response, **kwargs)
26
-
27
-
28
- class HateUnfairnessEvaluator:
10
+ class HateUnfairnessEvaluator(RaiServiceEvaluatorBase):
29
11
  """
30
12
  Initialize a hate-unfairness evaluator for hate unfairness score.
31
13
 
@@ -33,7 +15,7 @@ class HateUnfairnessEvaluator:
33
15
  It contains subscription id, resource group, and project name.
34
16
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
35
17
  :param credential: The credential for connecting to Azure AI project.
36
- :type credential: ~azure.core.credentials.TokenCredential
18
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
37
19
 
38
20
  **Usage**
39
21
 
@@ -58,21 +40,16 @@ class HateUnfairnessEvaluator:
58
40
  }
59
41
  """
60
42
 
61
- def __init__(self, azure_ai_project: dict, credential=None) -> None:
62
- self._async_evaluator = _AsyncHateUnfairnessEvaluator(azure_ai_project, credential)
63
-
64
- def __call__(self, *, query: str, response: str, **kwargs):
65
- """
66
- Evaluates hate unfairness content.
67
-
68
- :keyword query: The query to be evaluated.
69
- :paramtype query: str
70
- :keyword response: The response to be evaluated.
71
- :paramtype response: str
72
- :return: The hate unfairness score.
73
- :rtype: dict
74
- """
75
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
76
-
77
- def _to_async(self):
78
- return self._async_evaluator
43
+ @override
44
+ def __init__(
45
+ self,
46
+ azure_ai_project: dict,
47
+ credential: Optional[dict] = None,
48
+ eval_last_turn: bool = False,
49
+ ):
50
+ super().__init__(
51
+ eval_metric=EvaluationMetrics.HATE_FAIRNESS,
52
+ azure_ai_project=azure_ai_project,
53
+ credential=credential,
54
+ eval_last_turn=eval_last_turn,
55
+ )
@@ -1,29 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
4
+ from typing import Optional
5
+ from typing_extensions import override
5
6
  from azure.ai.evaluation._common.constants import EvaluationMetrics
6
- from azure.ai.evaluation._model_configurations import AzureAIProject
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
7
8
 
8
- try:
9
- from ._content_safety_base import ContentSafetyEvaluatorBase
10
- except ImportError:
11
- from _content_safety_base import ContentSafetyEvaluatorBase
12
9
 
13
-
14
- class _AsyncSelfHarmEvaluator(ContentSafetyEvaluatorBase):
15
- def __init__(self, azure_ai_project: dict, credential=None):
16
- super().__init__(
17
- metric=EvaluationMetrics.SELF_HARM,
18
- azure_ai_project=azure_ai_project,
19
- credential=credential,
20
- )
21
-
22
- async def __call__(self, *, query: str, response: str, **kwargs):
23
- return await super().__call__(query=query, response=response, **kwargs)
24
-
25
-
26
- class SelfHarmEvaluator:
10
+ class SelfHarmEvaluator(RaiServiceEvaluatorBase):
27
11
  """
28
12
  Initialize a self harm evaluator for self harm score.
29
13
 
@@ -31,7 +15,7 @@ class SelfHarmEvaluator:
31
15
  It contains subscription id, resource group, and project name.
32
16
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
17
  :param credential: The credential for connecting to Azure AI project.
34
- :type credential: ~azure.core.credentials.TokenCredential
18
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
35
19
 
36
20
  **Usage**
37
21
 
@@ -56,21 +40,16 @@ class SelfHarmEvaluator:
56
40
  }
57
41
  """
58
42
 
59
- def __init__(self, azure_ai_project: dict, credential=None):
60
- self._async_evaluator = _AsyncSelfHarmEvaluator(azure_ai_project, credential)
61
-
62
- def __call__(self, *, query: str, response: str, **kwargs):
63
- """
64
- Evaluates self harm content.
65
-
66
- :keyword query: The query to be evaluated.
67
- :paramtype query: str
68
- :keyword response: The response to be evaluated.
69
- :paramtype response: str
70
- :return: The self harm score.
71
- :rtype: dict
72
- """
73
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
-
75
- def _to_async(self):
76
- return self._async_evaluator
43
+ @override
44
+ def __init__(
45
+ self,
46
+ azure_ai_project: dict,
47
+ credential: Optional[dict] = None,
48
+ eval_last_turn: bool = False,
49
+ ):
50
+ super().__init__(
51
+ eval_metric=EvaluationMetrics.SELF_HARM,
52
+ azure_ai_project=azure_ai_project,
53
+ credential=credential,
54
+ eval_last_turn=eval_last_turn,
55
+ )
@@ -1,29 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
4
+ from typing import Optional
5
+ from typing_extensions import override
5
6
  from azure.ai.evaluation._common.constants import EvaluationMetrics
6
- from azure.ai.evaluation._model_configurations import AzureAIProject
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
7
8
 
8
- try:
9
- from ._content_safety_base import ContentSafetyEvaluatorBase
10
- except ImportError:
11
- from _content_safety_base import ContentSafetyEvaluatorBase
12
9
 
13
-
14
- class _AsyncSexualEvaluator(ContentSafetyEvaluatorBase):
15
- def __init__(self, azure_ai_project: dict, credential=None):
16
- super().__init__(
17
- metric=EvaluationMetrics.SEXUAL,
18
- azure_ai_project=azure_ai_project,
19
- credential=credential,
20
- )
21
-
22
- async def __call__(self, *, query: str, response: str, **kwargs):
23
- return await super().__call__(query=query, response=response, **kwargs)
24
-
25
-
26
- class SexualEvaluator:
10
+ class SexualEvaluator(RaiServiceEvaluatorBase):
27
11
  """
28
12
  Initialize a sexual evaluator for sexual score.
29
13
 
@@ -31,7 +15,7 @@ class SexualEvaluator:
31
15
  It contains subscription id, resource group, and project name.
32
16
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
17
  :param credential: The credential for connecting to Azure AI project.
34
- :type credential: ~azure.core.credentials.TokenCredential
18
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
35
19
 
36
20
  **Usage**
37
21
 
@@ -56,21 +40,16 @@ class SexualEvaluator:
56
40
  }
57
41
  """
58
42
 
59
- def __init__(self, azure_ai_project: dict, credential=None):
60
- self._async_evaluator = _AsyncSexualEvaluator(azure_ai_project, credential)
61
-
62
- def __call__(self, *, query: str, response: str, **kwargs):
63
- """
64
- Evaluates sexual content.
65
-
66
- :keyword query: The query to be evaluated.
67
- :paramtype query: str
68
- :keyword response: The response to be evaluated.
69
- :paramtype response: str
70
- :return: The sexual score.
71
- :rtype: dict
72
- """
73
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
-
75
- def _to_async(self):
76
- return self._async_evaluator
43
+ @override
44
+ def __init__(
45
+ self,
46
+ azure_ai_project: dict,
47
+ credential: Optional[dict] = None,
48
+ eval_last_turn: bool = False,
49
+ ):
50
+ super().__init__(
51
+ eval_metric=EvaluationMetrics.SEXUAL,
52
+ azure_ai_project=azure_ai_project,
53
+ credential=credential,
54
+ eval_last_turn=eval_last_turn,
55
+ )
@@ -1,29 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
4
+ from typing import Optional
5
+ from typing_extensions import override
5
6
  from azure.ai.evaluation._common.constants import EvaluationMetrics
6
- from azure.ai.evaluation._model_configurations import AzureAIProject
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
7
8
 
8
- try:
9
- from ._content_safety_base import ContentSafetyEvaluatorBase
10
- except ImportError:
11
- from _content_safety_base import ContentSafetyEvaluatorBase
12
9
 
13
-
14
- class _AsyncViolenceEvaluator(ContentSafetyEvaluatorBase):
15
- def __init__(self, azure_ai_project: dict, credential=None):
16
- super().__init__(
17
- metric=EvaluationMetrics.VIOLENCE,
18
- azure_ai_project=azure_ai_project,
19
- credential=credential,
20
- )
21
-
22
- async def __call__(self, *, query: str, response: str, **kwargs):
23
- return await super().__call__(query=query, response=response, **kwargs)
24
-
25
-
26
- class ViolenceEvaluator:
10
+ class ViolenceEvaluator(RaiServiceEvaluatorBase):
27
11
  """
28
12
  Initialize a violence evaluator for violence score.
29
13
 
@@ -31,7 +15,7 @@ class ViolenceEvaluator:
31
15
  It contains subscription id, resource group, and project name.
32
16
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
17
  :param credential: The credential for connecting to Azure AI project.
34
- :type credential: ~azure.core.credentials.TokenCredential
18
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
35
19
 
36
20
  **Usage**
37
21
 
@@ -56,21 +40,16 @@ class ViolenceEvaluator:
56
40
  }
57
41
  """
58
42
 
59
- def __init__(self, azure_ai_project: dict, credential=None):
60
- self._async_evaluator = _AsyncViolenceEvaluator(azure_ai_project, credential)
61
-
62
- def __call__(self, *, query: str, response: str, **kwargs):
63
- """
64
- Evaluates violence content.
65
-
66
- :keyword query: The query to be evaluated.
67
- :paramtype query: str
68
- :keyword response: The response to be evaluated.
69
- :paramtype response: str
70
- :return: The violence score.
71
- :rtype: dict
72
- """
73
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
-
75
- def _to_async(self):
76
- return self._async_evaluator
43
+ @override
44
+ def __init__(
45
+ self,
46
+ azure_ai_project: dict,
47
+ credential: Optional[dict] = None,
48
+ eval_last_turn: bool = False,
49
+ ):
50
+ super().__init__(
51
+ eval_metric=EvaluationMetrics.VIOLENCE,
52
+ azure_ai_project=azure_ai_project,
53
+ credential=credential,
54
+ eval_last_turn=eval_last_turn,
55
+ )
@@ -1,45 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
4
+ from typing import Optional
5
+ from typing_extensions import override
5
6
  from azure.ai.evaluation._common.constants import _InternalEvaluationMetrics
6
- from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service
7
- from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
8
- from azure.ai.evaluation._model_configurations import AzureAIProject
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
9
8
 
10
9
 
11
- class _AsyncECIEvaluator:
12
- def __init__(self, azure_ai_project: dict, credential=None):
13
- self._azure_ai_project = azure_ai_project
14
- self._credential = credential
15
-
16
- async def __call__(self, *, query: str, response: str, **kwargs):
17
- # Validate inputs
18
- # Raises value error if failed, so execution alone signifies success.
19
- if not (query and query.strip() and query != "None") or not (
20
- response and response.strip() and response != "None"
21
- ):
22
- msg = "Both 'query' and 'response' must be non-empty strings."
23
- raise EvaluationException(
24
- message=msg,
25
- internal_message=msg,
26
- error_category=ErrorCategory.MISSING_FIELD,
27
- error_blame=ErrorBlame.USER_ERROR,
28
- error_target=ErrorTarget.ECI_EVALUATOR,
29
- )
30
-
31
- # Run score computation based on supplied metric.
32
- result = await evaluate_with_rai_service(
33
- metric_name=_InternalEvaluationMetrics.ECI,
34
- query=query,
35
- response=response,
36
- project_scope=self._azure_ai_project,
37
- credential=self._credential,
38
- )
39
- return result
40
-
41
-
42
- class ECIEvaluator:
10
+ class ECIEvaluator(RaiServiceEvaluatorBase):
43
11
  """
44
12
  Initialize an ECI evaluator to evaluate ECI based on the following guidelines:
45
13
  Detects whether ECI is present without a disclaimer in the AI system’s response.
@@ -53,7 +21,7 @@ class ECIEvaluator:
53
21
  It contains subscription id, resource group, and project name.
54
22
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
55
23
  :param credential: The credential for connecting to Azure AI project.
56
- :type credential: ~azure.core.credentials.TokenCredential
24
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
57
25
  :return: Whether or not ECI was found in the response without a disclaimer, with AI-generated reasoning
58
26
  :rtype: Dict[str, str]
59
27
 
@@ -79,21 +47,16 @@ class ECIEvaluator:
79
47
  }
80
48
  """
81
49
 
82
- def __init__(self, azure_ai_project: dict, credential=None) -> None:
83
- self._async_evaluator = _AsyncECIEvaluator(azure_ai_project, credential)
84
-
85
- def __call__(self, *, query: str, response: str, **kwargs):
86
- """
87
- Evaluates ECI content.
88
-
89
- :keyword query: The query to be evaluated.
90
- :paramtype query: str
91
- :keyword response: The response to be evaluated.
92
- :paramtype response: str
93
- :return: The ECI result.
94
- :rtype: dict
95
- """
96
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
97
-
98
- def _to_async(self):
99
- return self._async_evaluator
50
+ @override
51
+ def __init__(
52
+ self,
53
+ azure_ai_project: dict,
54
+ credential: Optional[dict] = None,
55
+ eval_last_turn: bool = False,
56
+ ):
57
+ super().__init__(
58
+ eval_metric=_InternalEvaluationMetrics.ECI,
59
+ azure_ai_project=azure_ai_project,
60
+ credential=credential,
61
+ eval_last_turn=eval_last_turn,
62
+ )
@@ -6,7 +6,8 @@ from collections import Counter
6
6
  from typing import List
7
7
 
8
8
  from promptflow._utils.async_utils import async_run_allowing_running_loop
9
- from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
9
+
10
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
10
11
 
11
12
 
12
13
  class _AsyncF1ScoreEvaluator:
@@ -3,80 +3,13 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  import os
6
- import re
7
- from typing import Union
6
+ from typing import Optional
7
+ from typing_extensions import override
8
8
 
9
- import numpy as np
9
+ from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
10
10
 
11
- from promptflow._utils.async_utils import async_run_allowing_running_loop
12
- from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
13
- from promptflow.core import AsyncPrompty
14
11
 
15
- from ..._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
16
- from ..._common.utils import (
17
- check_and_add_api_version_for_aoai_model_config,
18
- check_and_add_user_agent_for_aoai_model_config,
19
- )
20
-
21
- try:
22
- from ..._user_agent import USER_AGENT
23
- except ImportError:
24
- USER_AGENT = None
25
-
26
-
27
- class _AsyncFluencyEvaluator:
28
- # Constants must be defined within eval's directory to be save/loadable
29
- PROMPTY_FILE = "fluency.prompty"
30
- LLM_CALL_TIMEOUT = 600
31
- DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
32
-
33
- def __init__(self, model_config: dict):
34
- check_and_add_api_version_for_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
35
-
36
- prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
37
-
38
- # Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
39
- # https://github.com/encode/httpx/discussions/2959
40
- prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
41
-
42
- check_and_add_user_agent_for_aoai_model_config(
43
- model_config,
44
- prompty_model_config,
45
- USER_AGENT,
46
- )
47
-
48
- current_dir = os.path.dirname(__file__)
49
- prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
50
- self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
51
-
52
- async def __call__(self, *, query: str, response: str, **kwargs):
53
- # Validate input parameters
54
- query = str(query or "")
55
- response = str(response or "")
56
-
57
- if not (query.strip() and response.strip()):
58
- msg = "Both 'query' and 'response' must be non-empty strings."
59
- raise EvaluationException(
60
- message=msg,
61
- internal_message=msg,
62
- error_category=ErrorCategory.MISSING_FIELD,
63
- error_blame=ErrorBlame.USER_ERROR,
64
- error_target=ErrorTarget.F1_EVALUATOR,
65
- )
66
-
67
- # Run the evaluation flow
68
- llm_output = await self._flow(query=query, response=response, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
69
-
70
- score = np.nan
71
- if llm_output:
72
- match = re.search(r"\d", llm_output)
73
- if match:
74
- score = float(match.group())
75
-
76
- return {"gpt_fluency": float(score)}
77
-
78
-
79
- class FluencyEvaluator:
12
+ class FluencyEvaluator(PromptyEvaluatorBase):
80
13
  """
81
14
  Initialize a fluency evaluator configured for a specific Azure OpenAI model.
82
15
 
@@ -102,21 +35,38 @@ class FluencyEvaluator:
102
35
  }
103
36
  """
104
37
 
105
- def __init__(self, model_config: dict):
106
- self._async_evaluator = _AsyncFluencyEvaluator(model_config)
38
+ PROMPTY_FILE = "fluency.prompty"
39
+ RESULT_KEY = "gpt_fluency"
107
40
 
108
- def __call__(self, *, query: str, response: str, **kwargs):
41
+ @override
42
+ def __init__(self, model_config: dict):
43
+ current_dir = os.path.dirname(__file__)
44
+ prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
45
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self.RESULT_KEY)
46
+
47
+ @override
48
+ def __call__(
49
+ self,
50
+ *,
51
+ query: Optional[str] = None,
52
+ response: Optional[str] = None,
53
+ conversation: Optional[dict] = None,
54
+ **kwargs
55
+ ):
109
56
  """
110
- Evaluate fluency.
57
+ Evaluate fluency. Accepts either a query and response for a single evaluation,
58
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
59
+ the evaluator will aggregate the results of each turn.
111
60
 
112
61
  :keyword query: The query to be evaluated.
113
62
  :paramtype query: str
114
63
  :keyword response: The response to be evaluated.
115
64
  :paramtype response: str
65
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
66
+ key "messages". Conversation turns are expected
67
+ to be dictionaries with keys "content" and "role".
68
+ :paramtype conversation: Optional[Dict]
116
69
  :return: The fluency score.
117
70
  :rtype: dict
118
71
  """
119
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
120
-
121
- def _to_async(self):
122
- return self._async_evaluator
72
+ return super().__call__(query=query, response=response, conversation=conversation, **kwargs)
@@ -3,11 +3,6 @@ name: Fluency
3
3
  description: Evaluates fluency score for QA scenario
4
4
  model:
5
5
  api: chat
6
- configuration:
7
- type: azure_openai
8
- azure_deployment: ${env:AZURE_DEPLOYMENT}
9
- api_key: ${env:AZURE_OPENAI_API_KEY}
10
- azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
11
6
  parameters:
12
7
  temperature: 0.0
13
8
  max_tokens: 1
@@ -2,8 +2,8 @@
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
4
  from nltk.translate.gleu_score import sentence_gleu
5
-
6
5
  from promptflow._utils.async_utils import async_run_allowing_running_loop
6
+
7
7
  from azure.ai.evaluation._common.utils import nltk_tokenize
8
8
 
9
9