azure-ai-evaluation 1.0.0b2__py3-none-any.whl → 1.0.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (43) hide show
  1. azure/ai/evaluation/__init__.py +9 -5
  2. azure/ai/evaluation/_common/utils.py +24 -9
  3. azure/ai/evaluation/_constants.py +4 -0
  4. azure/ai/evaluation/_evaluate/_evaluate.py +57 -39
  5. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +34 -81
  6. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +0 -5
  7. azure/ai/evaluation/_evaluators/_common/__init__.py +13 -0
  8. azure/ai/evaluation/_evaluators/_common/_base_eval.py +302 -0
  9. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +79 -0
  10. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +99 -0
  11. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +0 -4
  12. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +18 -41
  13. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +18 -39
  14. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +18 -39
  15. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +18 -39
  16. azure/ai/evaluation/_evaluators/_eci/_eci.py +18 -55
  17. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +29 -74
  18. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +0 -5
  19. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +33 -80
  20. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -5
  21. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +18 -65
  22. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +34 -83
  23. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +0 -5
  24. azure/ai/evaluation/_evaluators/{_chat → _retrieval}/__init__.py +2 -2
  25. azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/_retrieval.py +16 -22
  26. azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/retrieval.prompty +0 -5
  27. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +3 -11
  28. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +0 -5
  29. azure/ai/evaluation/_evaluators/_xpia/xpia.py +16 -90
  30. azure/ai/evaluation/_exceptions.py +0 -1
  31. azure/ai/evaluation/_model_configurations.py +36 -8
  32. azure/ai/evaluation/_version.py +1 -1
  33. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +2 -1
  34. azure/ai/evaluation/simulator/_simulator.py +19 -8
  35. {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.0.0b3.dist-info}/METADATA +59 -1
  36. {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.0.0b3.dist-info}/RECORD +38 -39
  37. azure/ai/evaluation/_evaluators/_chat/_chat.py +0 -357
  38. azure/ai/evaluation/_evaluators/_chat/retrieval/__init__.py +0 -9
  39. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +0 -65
  40. azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +0 -5
  41. azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +0 -104
  42. {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.0.0b3.dist-info}/WHEEL +0 -0
  43. {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.0.0b3.dist-info}/top_level.txt +0 -0
@@ -1,29 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
5
-
4
+ from typing import Optional
5
+ from typing_extensions import override
6
6
  from azure.ai.evaluation._common.constants import EvaluationMetrics
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
7
8
 
8
- try:
9
- from ._content_safety_base import ContentSafetyEvaluatorBase
10
- except ImportError:
11
- from _content_safety_base import ContentSafetyEvaluatorBase
12
-
13
-
14
- class _AsyncSexualEvaluator(ContentSafetyEvaluatorBase):
15
- def __init__(self, azure_ai_project: dict, credential=None):
16
- super().__init__(
17
- metric=EvaluationMetrics.SEXUAL,
18
- azure_ai_project=azure_ai_project,
19
- credential=credential,
20
- )
21
-
22
- async def __call__(self, *, query: str, response: str, **kwargs):
23
- return await super().__call__(query=query, response=response, **kwargs)
24
9
 
25
-
26
- class SexualEvaluator:
10
+ class SexualEvaluator(RaiServiceEvaluatorBase):
27
11
  """
28
12
  Initialize a sexual evaluator for sexual score.
29
13
 
@@ -31,7 +15,7 @@ class SexualEvaluator:
31
15
  It contains subscription id, resource group, and project name.
32
16
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
17
  :param credential: The credential for connecting to Azure AI project.
34
- :type credential: ~azure.core.credentials.TokenCredential
18
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
35
19
 
36
20
  **Usage**
37
21
 
@@ -56,21 +40,16 @@ class SexualEvaluator:
56
40
  }
57
41
  """
58
42
 
59
- def __init__(self, azure_ai_project: dict, credential=None):
60
- self._async_evaluator = _AsyncSexualEvaluator(azure_ai_project, credential)
61
-
62
- def __call__(self, *, query: str, response: str, **kwargs):
63
- """
64
- Evaluates sexual content.
65
-
66
- :keyword query: The query to be evaluated.
67
- :paramtype query: str
68
- :keyword response: The response to be evaluated.
69
- :paramtype response: str
70
- :return: The sexual score.
71
- :rtype: dict
72
- """
73
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
-
75
- def _to_async(self):
76
- return self._async_evaluator
43
+ @override
44
+ def __init__(
45
+ self,
46
+ azure_ai_project: dict,
47
+ credential: Optional[dict] = None,
48
+ eval_last_turn: bool = False,
49
+ ):
50
+ super().__init__(
51
+ eval_metric=EvaluationMetrics.SEXUAL,
52
+ azure_ai_project=azure_ai_project,
53
+ credential=credential,
54
+ eval_last_turn=eval_last_turn,
55
+ )
@@ -1,29 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
5
-
4
+ from typing import Optional
5
+ from typing_extensions import override
6
6
  from azure.ai.evaluation._common.constants import EvaluationMetrics
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
7
8
 
8
- try:
9
- from ._content_safety_base import ContentSafetyEvaluatorBase
10
- except ImportError:
11
- from _content_safety_base import ContentSafetyEvaluatorBase
12
-
13
-
14
- class _AsyncViolenceEvaluator(ContentSafetyEvaluatorBase):
15
- def __init__(self, azure_ai_project: dict, credential=None):
16
- super().__init__(
17
- metric=EvaluationMetrics.VIOLENCE,
18
- azure_ai_project=azure_ai_project,
19
- credential=credential,
20
- )
21
-
22
- async def __call__(self, *, query: str, response: str, **kwargs):
23
- return await super().__call__(query=query, response=response, **kwargs)
24
9
 
25
-
26
- class ViolenceEvaluator:
10
+ class ViolenceEvaluator(RaiServiceEvaluatorBase):
27
11
  """
28
12
  Initialize a violence evaluator for violence score.
29
13
 
@@ -31,7 +15,7 @@ class ViolenceEvaluator:
31
15
  It contains subscription id, resource group, and project name.
32
16
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
33
17
  :param credential: The credential for connecting to Azure AI project.
34
- :type credential: ~azure.core.credentials.TokenCredential
18
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
35
19
 
36
20
  **Usage**
37
21
 
@@ -56,21 +40,16 @@ class ViolenceEvaluator:
56
40
  }
57
41
  """
58
42
 
59
- def __init__(self, azure_ai_project: dict, credential=None):
60
- self._async_evaluator = _AsyncViolenceEvaluator(azure_ai_project, credential)
61
-
62
- def __call__(self, *, query: str, response: str, **kwargs):
63
- """
64
- Evaluates violence content.
65
-
66
- :keyword query: The query to be evaluated.
67
- :paramtype query: str
68
- :keyword response: The response to be evaluated.
69
- :paramtype response: str
70
- :return: The violence score.
71
- :rtype: dict
72
- """
73
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
74
-
75
- def _to_async(self):
76
- return self._async_evaluator
43
+ @override
44
+ def __init__(
45
+ self,
46
+ azure_ai_project: dict,
47
+ credential: Optional[dict] = None,
48
+ eval_last_turn: bool = False,
49
+ ):
50
+ super().__init__(
51
+ eval_metric=EvaluationMetrics.VIOLENCE,
52
+ azure_ai_project=azure_ai_project,
53
+ credential=credential,
54
+ eval_last_turn=eval_last_turn,
55
+ )
@@ -1,45 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
5
-
4
+ from typing import Optional
5
+ from typing_extensions import override
6
6
  from azure.ai.evaluation._common.constants import _InternalEvaluationMetrics
7
- from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service
8
- from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
9
-
10
-
11
- class _AsyncECIEvaluator:
12
- def __init__(self, azure_ai_project: dict, credential=None):
13
- self._azure_ai_project = azure_ai_project
14
- self._credential = credential
15
-
16
- async def __call__(self, *, query: str, response: str, **kwargs):
17
- # Validate inputs
18
- # Raises value error if failed, so execution alone signifies success.
19
- if not (query and query.strip() and query != "None") or not (
20
- response and response.strip() and response != "None"
21
- ):
22
- msg = "Both 'query' and 'response' must be non-empty strings."
23
- raise EvaluationException(
24
- message=msg,
25
- internal_message=msg,
26
- error_category=ErrorCategory.MISSING_FIELD,
27
- error_blame=ErrorBlame.USER_ERROR,
28
- error_target=ErrorTarget.ECI_EVALUATOR,
29
- )
30
-
31
- # Run score computation based on supplied metric.
32
- result = await evaluate_with_rai_service(
33
- metric_name=_InternalEvaluationMetrics.ECI,
34
- query=query,
35
- response=response,
36
- project_scope=self._azure_ai_project,
37
- credential=self._credential,
38
- )
39
- return result
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
40
8
 
41
9
 
42
- class ECIEvaluator:
10
+ class ECIEvaluator(RaiServiceEvaluatorBase):
43
11
  """
44
12
  Initialize an ECI evaluator to evaluate ECI based on the following guidelines:
45
13
  Detects whether ECI is present without a disclaimer in the AI system’s response.
@@ -53,7 +21,7 @@ class ECIEvaluator:
53
21
  It contains subscription id, resource group, and project name.
54
22
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
55
23
  :param credential: The credential for connecting to Azure AI project.
56
- :type credential: ~azure.core.credentials.TokenCredential
24
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
57
25
  :return: Whether or not ECI was found in the response without a disclaimer, with AI-generated reasoning
58
26
  :rtype: Dict[str, str]
59
27
 
@@ -79,21 +47,16 @@ class ECIEvaluator:
79
47
  }
80
48
  """
81
49
 
82
- def __init__(self, azure_ai_project: dict, credential=None) -> None:
83
- self._async_evaluator = _AsyncECIEvaluator(azure_ai_project, credential)
84
-
85
- def __call__(self, *, query: str, response: str, **kwargs):
86
- """
87
- Evaluates ECI content.
88
-
89
- :keyword query: The query to be evaluated.
90
- :paramtype query: str
91
- :keyword response: The response to be evaluated.
92
- :paramtype response: str
93
- :return: The ECI result.
94
- :rtype: dict
95
- """
96
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
97
-
98
- def _to_async(self):
99
- return self._async_evaluator
50
+ @override
51
+ def __init__(
52
+ self,
53
+ azure_ai_project: dict,
54
+ credential: Optional[dict] = None,
55
+ eval_last_turn: bool = False,
56
+ ):
57
+ super().__init__(
58
+ eval_metric=_InternalEvaluationMetrics.ECI,
59
+ azure_ai_project=azure_ai_project,
60
+ credential=credential,
61
+ eval_last_turn=eval_last_turn,
62
+ )
@@ -3,75 +3,13 @@
3
3
  # ---------------------------------------------------------
4
4
 
5
5
  import os
6
- import re
6
+ from typing import Optional
7
+ from typing_extensions import override
7
8
 
8
- import numpy as np
9
- from promptflow._utils.async_utils import async_run_allowing_running_loop
10
- from promptflow.core import AsyncPrompty
9
+ from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
10
 
12
- from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
13
11
 
14
- from ..._common.utils import ensure_api_version_in_aoai_model_config, ensure_user_agent_in_aoai_model_config
15
-
16
- try:
17
- from ..._user_agent import USER_AGENT
18
- except ImportError:
19
- USER_AGENT = None
20
-
21
-
22
- class _AsyncFluencyEvaluator:
23
- # Constants must be defined within eval's directory to be save/loadable
24
- PROMPTY_FILE = "fluency.prompty"
25
- LLM_CALL_TIMEOUT = 600
26
- DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
27
-
28
- def __init__(self, model_config: dict):
29
- ensure_api_version_in_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
30
-
31
- prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
32
-
33
- # Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
34
- # https://github.com/encode/httpx/discussions/2959
35
- prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
36
-
37
- ensure_user_agent_in_aoai_model_config(
38
- model_config,
39
- prompty_model_config,
40
- USER_AGENT,
41
- )
42
-
43
- current_dir = os.path.dirname(__file__)
44
- prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
45
- self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
46
-
47
- async def __call__(self, *, query: str, response: str, **kwargs):
48
- # Validate input parameters
49
- query = str(query or "")
50
- response = str(response or "")
51
-
52
- if not (query.strip() and response.strip()):
53
- msg = "Both 'query' and 'response' must be non-empty strings."
54
- raise EvaluationException(
55
- message=msg,
56
- internal_message=msg,
57
- error_category=ErrorCategory.MISSING_FIELD,
58
- error_blame=ErrorBlame.USER_ERROR,
59
- error_target=ErrorTarget.F1_EVALUATOR,
60
- )
61
-
62
- # Run the evaluation flow
63
- llm_output = await self._flow(query=query, response=response, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
64
-
65
- score = np.nan
66
- if llm_output:
67
- match = re.search(r"\d", llm_output)
68
- if match:
69
- score = float(match.group())
70
-
71
- return {"gpt_fluency": float(score)}
72
-
73
-
74
- class FluencyEvaluator:
12
+ class FluencyEvaluator(PromptyEvaluatorBase):
75
13
  """
76
14
  Initialize a fluency evaluator configured for a specific Azure OpenAI model.
77
15
 
@@ -97,21 +35,38 @@ class FluencyEvaluator:
97
35
  }
98
36
  """
99
37
 
100
- def __init__(self, model_config: dict):
101
- self._async_evaluator = _AsyncFluencyEvaluator(model_config)
38
+ PROMPTY_FILE = "fluency.prompty"
39
+ RESULT_KEY = "gpt_fluency"
102
40
 
103
- def __call__(self, *, query: str, response: str, **kwargs):
41
+ @override
42
+ def __init__(self, model_config: dict):
43
+ current_dir = os.path.dirname(__file__)
44
+ prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
45
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self.RESULT_KEY)
46
+
47
+ @override
48
+ def __call__(
49
+ self,
50
+ *,
51
+ query: Optional[str] = None,
52
+ response: Optional[str] = None,
53
+ conversation: Optional[dict] = None,
54
+ **kwargs
55
+ ):
104
56
  """
105
- Evaluate fluency.
57
+ Evaluate fluency. Accepts either a query and response for a single evaluation,
58
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
59
+ the evaluator will aggregate the results of each turn.
106
60
 
107
61
  :keyword query: The query to be evaluated.
108
62
  :paramtype query: str
109
63
  :keyword response: The response to be evaluated.
110
64
  :paramtype response: str
65
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
66
+ key "messages". Conversation turns are expected
67
+ to be dictionaries with keys "content" and "role".
68
+ :paramtype conversation: Optional[Dict]
111
69
  :return: The fluency score.
112
70
  :rtype: dict
113
71
  """
114
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
115
-
116
- def _to_async(self):
117
- return self._async_evaluator
72
+ return super().__call__(query=query, response=response, conversation=conversation, **kwargs)
@@ -3,11 +3,6 @@ name: Fluency
3
3
  description: Evaluates fluency score for QA scenario
4
4
  model:
5
5
  api: chat
6
- configuration:
7
- type: azure_openai
8
- azure_deployment: ${env:AZURE_DEPLOYMENT}
9
- api_key: ${env:AZURE_OPENAI_API_KEY}
10
- azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
11
6
  parameters:
12
7
  temperature: 0.0
13
8
  max_tokens: 1
@@ -1,77 +1,14 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
-
5
4
  import os
6
- import re
7
-
8
- import numpy as np
9
- from promptflow._utils.async_utils import async_run_allowing_running_loop
10
- from promptflow.core import AsyncPrompty
11
-
12
- from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
13
-
14
- from ..._common.utils import ensure_api_version_in_aoai_model_config, ensure_user_agent_in_aoai_model_config
5
+ from typing import Optional
6
+ from typing_extensions import override
15
7
 
16
- try:
17
- from ..._user_agent import USER_AGENT
18
- except ImportError:
19
- USER_AGENT = None
20
-
21
-
22
- class _AsyncGroundednessEvaluator:
23
- # Constants must be defined within eval's directory to be save/loadable
24
- PROMPTY_FILE = "groundedness.prompty"
25
- LLM_CALL_TIMEOUT = 600
26
- DEFAULT_OPEN_API_VERSION = "2024-02-15-preview"
8
+ from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
27
9
 
28
- def __init__(self, model_config: dict):
29
- ensure_api_version_in_aoai_model_config(model_config, self.DEFAULT_OPEN_API_VERSION)
30
-
31
- prompty_model_config = {"configuration": model_config, "parameters": {"extra_headers": {}}}
32
-
33
- # Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
34
- # https://github.com/encode/httpx/discussions/2959
35
- prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
36
-
37
- ensure_user_agent_in_aoai_model_config(
38
- model_config,
39
- prompty_model_config,
40
- USER_AGENT,
41
- )
42
-
43
- current_dir = os.path.dirname(__file__)
44
- prompty_path = os.path.join(current_dir, "groundedness.prompty")
45
- self._flow = AsyncPrompty.load(source=prompty_path, model=prompty_model_config)
46
10
 
47
- async def __call__(self, *, response: str, context: str, **kwargs):
48
- # Validate input parameters
49
- response = str(response or "")
50
- context = str(context or "")
51
-
52
- if not response.strip() or not context.strip():
53
- msg = "Both 'response' and 'context' must be non-empty strings."
54
- raise EvaluationException(
55
- message=msg,
56
- internal_message=msg,
57
- error_category=ErrorCategory.MISSING_FIELD,
58
- error_blame=ErrorBlame.USER_ERROR,
59
- error_target=ErrorTarget.F1_EVALUATOR,
60
- )
61
-
62
- # Run the evaluation flow
63
- llm_output = await self._flow(response=response, context=context, timeout=self.LLM_CALL_TIMEOUT, **kwargs)
64
-
65
- score = np.nan
66
- if llm_output:
67
- match = re.search(r"\d", llm_output)
68
- if match:
69
- score = float(match.group())
70
-
71
- return {"gpt_groundedness": float(score)}
72
-
73
-
74
- class GroundednessEvaluator:
11
+ class GroundednessEvaluator(PromptyEvaluatorBase):
75
12
  """
76
13
  Initialize a groundedness evaluator configured for a specific Azure OpenAI model.
77
14
 
@@ -98,21 +35,37 @@ class GroundednessEvaluator:
98
35
  }
99
36
  """
100
37
 
101
- def __init__(self, model_config: dict):
102
- self._async_evaluator = _AsyncGroundednessEvaluator(model_config)
38
+ PROMPTY_FILE = "groundedness.prompty"
39
+ RESULT_KEY = "gpt_groundedness"
103
40
 
104
- def __call__(self, *, response: str, context: str, **kwargs):
105
- """
106
- Evaluate groundedness of the response in the context.
41
+ @override
42
+ def __init__(self, model_config: dict):
43
+ current_dir = os.path.dirname(__file__)
44
+ prompty_path = os.path.join(current_dir, self.PROMPTY_FILE)
45
+ super().__init__(model_config=model_config, prompty_file=prompty_path, result_key=self.RESULT_KEY)
46
+
47
+ @override
48
+ def __call__(
49
+ self,
50
+ *,
51
+ response: Optional[str] = None,
52
+ context: Optional[str] = None,
53
+ conversation: Optional[dict] = None,
54
+ **kwargs
55
+ ):
56
+ """Evaluate groundedless. Accepts either a response and context a single evaluation,
57
+ or a conversation for a multi-turn evaluation. If the conversation has more than one turn,
58
+ the evaluator will aggregate the results of each turn.
107
59
 
108
60
  :keyword response: The response to be evaluated.
109
- :paramtype response: str
110
- :keyword context: The context in which the response is evaluated.
111
- :paramtype context: str
112
- :return: The groundedness score.
61
+ :paramtype response: Optional[str]
62
+ :keyword context: The context to be evaluated.
63
+ :paramtype context: Optional[str]
64
+ :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
65
+ key "messages", and potentially a global context under the key "context". Conversation turns are expected
66
+ to be dictionaries with keys "content", "role", and possibly "context".
67
+ :paramtype conversation: Optional[Dict]
68
+ :return: The relevance score.
113
69
  :rtype: dict
114
70
  """
115
- return async_run_allowing_running_loop(self._async_evaluator, response=response, context=context, **kwargs)
116
-
117
- def _to_async(self):
118
- return self._async_evaluator
71
+ return super().__call__(response=response, context=context, conversation=conversation, **kwargs)
@@ -3,11 +3,6 @@ name: Groundedness
3
3
  description: Evaluates groundedness score for QA scenario
4
4
  model:
5
5
  api: chat
6
- configuration:
7
- type: azure_openai
8
- azure_deployment: ${env:AZURE_DEPLOYMENT}
9
- api_key: ${env:AZURE_OPENAI_API_KEY}
10
- azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT}
11
6
  parameters:
12
7
  temperature: 0.0
13
8
  max_tokens: 1
@@ -1,55 +1,13 @@
1
1
  # ---------------------------------------------------------
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
- from promptflow._utils.async_utils import async_run_allowing_running_loop
5
-
4
+ from typing import Optional
5
+ from typing_extensions import override
6
6
  from azure.ai.evaluation._common.constants import EvaluationMetrics
7
- from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service
8
- from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
9
-
10
-
11
- class _AsyncProtectedMaterialEvaluator:
12
- def __init__(self, azure_ai_project: dict, credential=None):
13
- self._azure_ai_project = azure_ai_project
14
- self._credential = credential
7
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
15
8
 
16
- async def __call__(self, *, query: str, response: str, **kwargs):
17
- """
18
- Evaluates content according to this evaluator's metric.
19
-
20
- :keyword query: The query to be evaluated.
21
- :paramtype query: str
22
- :keyword response: The response to be evaluated.
23
- :paramtype response: str
24
- :return: The evaluation score computation based on the Content Safety metric (self.metric).
25
- :rtype: Any
26
- """
27
- # Validate inputs
28
- # Raises value error if failed, so execution alone signifies success.
29
- if not (query and query.strip() and query != "None") or not (
30
- response and response.strip() and response != "None"
31
- ):
32
- msg = "Both 'query' and 'response' must be non-empty strings."
33
- raise EvaluationException(
34
- message=msg,
35
- internal_message=msg,
36
- error_category=ErrorCategory.MISSING_FIELD,
37
- error_blame=ErrorBlame.USER_ERROR,
38
- error_target=ErrorTarget.PROTECTED_MATERIAL_EVALUATOR,
39
- )
40
-
41
- # Run score computation based on supplied metric.
42
- result = await evaluate_with_rai_service(
43
- metric_name=EvaluationMetrics.PROTECTED_MATERIAL,
44
- query=query,
45
- response=response,
46
- project_scope=self._azure_ai_project,
47
- credential=self._credential,
48
- )
49
- return result
50
9
 
51
-
52
- class ProtectedMaterialEvaluator:
10
+ class ProtectedMaterialEvaluator(RaiServiceEvaluatorBase):
53
11
  """
54
12
  Initialize a protected material evaluator to detect whether protected material
55
13
  is present in your AI system's response. Outputs True or False with AI-generated reasoning.
@@ -58,7 +16,7 @@ class ProtectedMaterialEvaluator:
58
16
  It contains subscription id, resource group, and project name.
59
17
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
60
18
  :param credential: The credential for connecting to Azure AI project.
61
- :type credential: ~azure.core.credentials.TokenCredential
19
+ :type credential: Optional[~azure.core.credentials.TokenCredential]
62
20
  :return: Whether or not protected material was found in the response, with AI-generated reasoning.
63
21
  :rtype: Dict[str, str]
64
22
 
@@ -84,21 +42,16 @@ class ProtectedMaterialEvaluator:
84
42
  }
85
43
  """
86
44
 
87
- def __init__(self, azure_ai_project: dict, credential=None):
88
- self._async_evaluator = _AsyncProtectedMaterialEvaluator(azure_ai_project, credential)
89
-
90
- def __call__(self, *, query: str, response: str, **kwargs):
91
- """
92
- Evaluates protected material content.
93
-
94
- :keyword query: The query to be evaluated.
95
- :paramtype query: str
96
- :keyword response: The response to be evaluated.
97
- :paramtype response: str
98
- :return: A dictionary containing a boolean label and reasoning.
99
- :rtype: dict
100
- """
101
- return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
102
-
103
- def _to_async(self):
104
- return self._async_evaluator
45
+ @override
46
+ def __init__(
47
+ self,
48
+ azure_ai_project: dict,
49
+ credential: Optional[dict] = None,
50
+ eval_last_turn: bool = False,
51
+ ):
52
+ super().__init__(
53
+ eval_metric=EvaluationMetrics.PROTECTED_MATERIAL,
54
+ azure_ai_project=azure_ai_project,
55
+ credential=credential,
56
+ eval_last_turn=eval_last_turn,
57
+ )