azure-ai-evaluation 1.0.0__py3-none-any.whl → 1.0.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/__init__.py +5 -31
- azure/ai/evaluation/_common/constants.py +2 -9
- azure/ai/evaluation/_common/rai_service.py +120 -300
- azure/ai/evaluation/_common/utils.py +23 -381
- azure/ai/evaluation/_constants.py +6 -19
- azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/__init__.py +2 -3
- azure/ai/evaluation/_evaluate/{_batch_run/eval_run_context.py → _batch_run_client/batch_run_context.py} +7 -23
- azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/code_client.py +17 -33
- azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/proxy_client.py +4 -32
- azure/ai/evaluation/_evaluate/_eval_run.py +24 -81
- azure/ai/evaluation/_evaluate/_evaluate.py +239 -393
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +17 -17
- azure/ai/evaluation/_evaluate/_utils.py +28 -82
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +18 -17
- azure/ai/evaluation/_evaluators/{_retrieval → _chat}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_chat/_chat.py +357 -0
- azure/ai/evaluation/_evaluators/{_service_groundedness → _chat/retrieval}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +157 -0
- azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +48 -0
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +88 -78
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +39 -76
- azure/ai/evaluation/_evaluators/_content_safety/__init__.py +4 -0
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +67 -105
- azure/ai/evaluation/_evaluators/{_multimodal/_content_safety_multimodal_base.py → _content_safety/_content_safety_base.py} +34 -24
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +301 -0
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +54 -105
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +52 -99
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +52 -101
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +51 -101
- azure/ai/evaluation/_evaluators/_eci/_eci.py +54 -44
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +19 -34
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +89 -76
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +41 -66
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +16 -14
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +87 -113
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +54 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +27 -20
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +80 -89
- azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +104 -0
- azure/ai/evaluation/_evaluators/_qa/_qa.py +30 -23
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +96 -84
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +47 -78
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +27 -26
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +38 -53
- azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +5 -0
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +105 -91
- azure/ai/evaluation/_exceptions.py +7 -28
- azure/ai/evaluation/_http_utils.py +132 -203
- azure/ai/evaluation/_model_configurations.py +8 -104
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/simulator/__init__.py +1 -2
- azure/ai/evaluation/simulator/_adversarial_scenario.py +1 -20
- azure/ai/evaluation/simulator/_adversarial_simulator.py +92 -111
- azure/ai/evaluation/simulator/_constants.py +1 -11
- azure/ai/evaluation/simulator/_conversation/__init__.py +12 -13
- azure/ai/evaluation/simulator/_conversation/_conversation.py +4 -4
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +67 -33
- azure/ai/evaluation/simulator/_helpers/__init__.py +2 -1
- azure/ai/evaluation/{_common → simulator/_helpers}/_experimental.py +9 -24
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +5 -26
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +94 -107
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +22 -70
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +11 -28
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +4 -8
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +24 -68
- azure/ai/evaluation/simulator/_model_tools/models.py +10 -10
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +10 -6
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +5 -6
- azure/ai/evaluation/simulator/_simulator.py +207 -277
- azure/ai/evaluation/simulator/_tracing.py +4 -4
- azure/ai/evaluation/simulator/_utils.py +13 -31
- azure_ai_evaluation-1.0.0b2.dist-info/METADATA +449 -0
- azure_ai_evaluation-1.0.0b2.dist-info/RECORD +99 -0
- {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/WHEEL +1 -1
- azure/ai/evaluation/_common/math.py +0 -89
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +0 -46
- azure/ai/evaluation/_evaluators/_common/__init__.py +0 -13
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +0 -344
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +0 -88
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +0 -133
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +0 -113
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +0 -99
- azure/ai/evaluation/_evaluators/_multimodal/__init__.py +0 -20
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +0 -132
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +0 -124
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +0 -100
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +0 -112
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +0 -93
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +0 -148
- azure/ai/evaluation/_vendor/__init__.py +0 -3
- azure/ai/evaluation/_vendor/rouge_score/__init__.py +0 -14
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +0 -328
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +0 -63
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +0 -63
- azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +0 -53
- azure/ai/evaluation/simulator/_data_sources/__init__.py +0 -3
- azure/ai/evaluation/simulator/_data_sources/grounding.json +0 -1150
- azure_ai_evaluation-1.0.0.dist-info/METADATA +0 -595
- azure_ai_evaluation-1.0.0.dist-info/NOTICE.txt +0 -70
- azure_ai_evaluation-1.0.0.dist-info/RECORD +0 -119
- {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/top_level.txt +0 -0
|
@@ -2,30 +2,20 @@
|
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
|
-
import re
|
|
6
|
-
import math
|
|
7
5
|
import threading
|
|
8
|
-
from typing import
|
|
6
|
+
from typing import List, Optional, Union
|
|
9
7
|
|
|
10
8
|
import nltk
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
from azure.ai.evaluation.
|
|
14
|
-
from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
|
|
15
|
-
from azure.ai.evaluation._model_configurations import (
|
|
16
|
-
AzureAIProject,
|
|
17
|
-
AzureOpenAIModelConfiguration,
|
|
18
|
-
OpenAIModelConfiguration,
|
|
19
|
-
)
|
|
9
|
+
import numpy as np
|
|
10
|
+
|
|
11
|
+
from azure.ai.evaluation._model_configurations import AzureOpenAIModelConfiguration, OpenAIModelConfiguration
|
|
20
12
|
|
|
21
13
|
from . import constants
|
|
22
14
|
|
|
23
15
|
_nltk_data_download_lock = threading.Lock()
|
|
24
16
|
|
|
25
|
-
T_TypedDict = TypeVar("T_TypedDict", bound=Mapping[Any, Any])
|
|
26
|
-
|
|
27
17
|
|
|
28
|
-
def get_harm_severity_level(harm_score:
|
|
18
|
+
def get_harm_severity_level(harm_score: int) -> str:
|
|
29
19
|
"""Generate harm severity level based on harm score.
|
|
30
20
|
|
|
31
21
|
:param harm_score: The harm score to be evaluated.
|
|
@@ -39,29 +29,23 @@ def get_harm_severity_level(harm_score: Union[float, int]) -> Union[str, float]:
|
|
|
39
29
|
constants.HarmSeverityLevel.Medium: [4, 5],
|
|
40
30
|
constants.HarmSeverityLevel.High: [6, 7],
|
|
41
31
|
}
|
|
42
|
-
if
|
|
43
|
-
return
|
|
32
|
+
if harm_score == np.nan or harm_score is None:
|
|
33
|
+
return np.nan
|
|
44
34
|
for harm_level, harm_score_range in HARM_SEVERITY_LEVEL_MAPPING.items():
|
|
45
35
|
if harm_score_range[0] <= harm_score <= harm_score_range[1]:
|
|
46
36
|
return harm_level.value
|
|
47
|
-
return
|
|
37
|
+
return np.nan
|
|
48
38
|
|
|
49
39
|
|
|
50
40
|
def ensure_nltk_data_downloaded():
|
|
51
41
|
"""Download NLTK data packages if not already downloaded."""
|
|
52
|
-
nltk_data = [
|
|
53
|
-
("wordnet", "corpora/wordnet.zip"),
|
|
54
|
-
("perluniprops", "misc/perluniprops.zip"),
|
|
55
|
-
("punkt", "tokenizers/punkt.zip"),
|
|
56
|
-
("punkt_tab", "tokenizers/punkt_tab.zip"),
|
|
57
|
-
]
|
|
58
|
-
|
|
59
42
|
with _nltk_data_download_lock:
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
43
|
+
try:
|
|
44
|
+
from nltk.tokenize.nist import NISTTokenizer # pylint: disable=unused-import
|
|
45
|
+
except LookupError:
|
|
46
|
+
nltk.download("perluniprops")
|
|
47
|
+
nltk.download("punkt")
|
|
48
|
+
nltk.download("punkt_tab")
|
|
65
49
|
|
|
66
50
|
|
|
67
51
|
def nltk_tokenize(text: str) -> List[str]:
|
|
@@ -86,360 +70,18 @@ def nltk_tokenize(text: str) -> List[str]:
|
|
|
86
70
|
return list(tokens)
|
|
87
71
|
|
|
88
72
|
|
|
89
|
-
def
|
|
90
|
-
return isinstance(val, dict) and all(isinstance(val.get(k), str) for k in ("azure_endpoint", "azure_deployment"))
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def _is_openai_model_config(val: object) -> TypeGuard[OpenAIModelConfiguration]:
|
|
94
|
-
return isinstance(val, dict) and all(isinstance(val.get(k), str) for k in ("model"))
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
def parse_model_config_type(
|
|
98
|
-
model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration],
|
|
99
|
-
) -> None:
|
|
100
|
-
if _is_aoi_model_config(model_config):
|
|
101
|
-
model_config["type"] = AZURE_OPENAI_TYPE
|
|
102
|
-
elif _is_openai_model_config(model_config):
|
|
103
|
-
model_config["type"] = OPENAI_TYPE
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
def construct_prompty_model_config(
|
|
73
|
+
def ensure_api_version_in_aoai_model_config(
|
|
107
74
|
model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration],
|
|
108
75
|
default_api_version: str,
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
parse_model_config_type(model_config)
|
|
112
|
-
|
|
113
|
-
if _is_aoi_model_config(model_config):
|
|
76
|
+
) -> None:
|
|
77
|
+
if "azure_endpoint" in model_config or "azure_deployment" in model_config:
|
|
114
78
|
model_config["api_version"] = model_config.get("api_version", default_api_version)
|
|
115
79
|
|
|
116
|
-
prompty_model_config: dict = {"configuration": model_config, "parameters": {"extra_headers": {}}}
|
|
117
|
-
|
|
118
|
-
# Handle "RuntimeError: Event loop is closed" from httpx AsyncClient
|
|
119
|
-
# https://github.com/encode/httpx/discussions/2959
|
|
120
|
-
prompty_model_config["parameters"]["extra_headers"].update({"Connection": "close"})
|
|
121
80
|
|
|
122
|
-
|
|
81
|
+
def ensure_user_agent_in_aoai_model_config(
|
|
82
|
+
model_config: Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration],
|
|
83
|
+
prompty_model_config: dict,
|
|
84
|
+
user_agent: Optional[str] = None,
|
|
85
|
+
) -> None:
|
|
86
|
+
if user_agent and ("azure_endpoint" in model_config or "azure_deployment" in model_config):
|
|
123
87
|
prompty_model_config["parameters"]["extra_headers"].update({"x-ms-useragent": user_agent})
|
|
124
|
-
|
|
125
|
-
return prompty_model_config
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
def validate_azure_ai_project(o: object) -> AzureAIProject:
|
|
129
|
-
fields = {"subscription_id": str, "resource_group_name": str, "project_name": str}
|
|
130
|
-
|
|
131
|
-
if not isinstance(o, dict):
|
|
132
|
-
msg = "The 'azure_ai_project' parameter must be a dictionary."
|
|
133
|
-
raise EvaluationException(
|
|
134
|
-
message=msg,
|
|
135
|
-
category=ErrorCategory.INVALID_VALUE,
|
|
136
|
-
blame=ErrorBlame.USER_ERROR,
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
missing_fields = set(fields.keys()) - o.keys()
|
|
140
|
-
|
|
141
|
-
if missing_fields:
|
|
142
|
-
msg = (
|
|
143
|
-
"The 'azure_ai_project' dictionary is missing the following required "
|
|
144
|
-
f"field(s): {', '.join(f'{field}' for field in missing_fields)}."
|
|
145
|
-
)
|
|
146
|
-
raise EvaluationException(
|
|
147
|
-
message=msg,
|
|
148
|
-
category=ErrorCategory.INVALID_VALUE,
|
|
149
|
-
blame=ErrorBlame.USER_ERROR,
|
|
150
|
-
)
|
|
151
|
-
|
|
152
|
-
for field_name, expected_type in fields.items():
|
|
153
|
-
if isinstance(o[field_name], expected_type):
|
|
154
|
-
continue
|
|
155
|
-
|
|
156
|
-
msg = f"Invalid type for field '{field_name}'. Expected {expected_type}, but got {type(o[field_name])}."
|
|
157
|
-
raise EvaluationException(
|
|
158
|
-
message=msg,
|
|
159
|
-
category=ErrorCategory.INVALID_VALUE,
|
|
160
|
-
blame=ErrorBlame.USER_ERROR,
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
return cast(AzureAIProject, o)
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
def validate_model_config(config: dict) -> Union[AzureOpenAIModelConfiguration, OpenAIModelConfiguration]:
|
|
167
|
-
try:
|
|
168
|
-
return _validate_typed_dict(config, AzureOpenAIModelConfiguration)
|
|
169
|
-
except TypeError:
|
|
170
|
-
try:
|
|
171
|
-
return _validate_typed_dict(config, OpenAIModelConfiguration)
|
|
172
|
-
except TypeError as e:
|
|
173
|
-
msg = "Model config validation failed."
|
|
174
|
-
raise EvaluationException(
|
|
175
|
-
message=msg, internal_message=msg, category=ErrorCategory.MISSING_FIELD, blame=ErrorBlame.USER_ERROR
|
|
176
|
-
) from e
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
def _validate_typed_dict(o: object, t: Type[T_TypedDict]) -> T_TypedDict:
|
|
180
|
-
"""Do very basic runtime validation that an object is a typed dict
|
|
181
|
-
|
|
182
|
-
.. warning::
|
|
183
|
-
|
|
184
|
-
This validation is very basic, robust enough to cover some very simple TypedDicts.
|
|
185
|
-
Ideally, validation of this kind should be delegated to something more robust.
|
|
186
|
-
|
|
187
|
-
You will very quickly run into limitations trying to apply this function more broadly:
|
|
188
|
-
* Doesn't support stringized annotations at all
|
|
189
|
-
* Very limited support for generics, and "special form" (NoReturn, NotRequired, Required, etc...) types.
|
|
190
|
-
* Error messages are poor, especially if there is any nesting.
|
|
191
|
-
|
|
192
|
-
:param object o: The object to check
|
|
193
|
-
:param Type[T_TypedDict] t: The TypedDict to validate against
|
|
194
|
-
:raises NotImplementedError: Several forms of validation are unsupported
|
|
195
|
-
* Checking against stringized annotations
|
|
196
|
-
* Checking a generic that is not one of a few basic forms
|
|
197
|
-
:raises TypeError: If a value does not match the specified annotation
|
|
198
|
-
:raises ValueError: If t's annotation is not a string, type of a special form (e.g. NotRequired, Required, etc...)
|
|
199
|
-
:returns: The object passed in
|
|
200
|
-
:rtype: T_TypedDict
|
|
201
|
-
"""
|
|
202
|
-
if not isinstance(o, dict):
|
|
203
|
-
raise TypeError(f"Expected type 'dict', got type '{type(object)}'.")
|
|
204
|
-
|
|
205
|
-
annotations = t.__annotations__
|
|
206
|
-
is_total = getattr(t, "__total__", False)
|
|
207
|
-
unknown_keys = set(o.keys()) - annotations.keys()
|
|
208
|
-
|
|
209
|
-
if unknown_keys:
|
|
210
|
-
raise TypeError(f"dict contains unknown keys: {list(unknown_keys)!r}")
|
|
211
|
-
|
|
212
|
-
required_keys = {
|
|
213
|
-
k
|
|
214
|
-
for k in annotations
|
|
215
|
-
if (is_total and get_origin(annotations[k]) is not NotRequired)
|
|
216
|
-
or (not is_total and get_origin(annotations[k]) is Required)
|
|
217
|
-
}
|
|
218
|
-
|
|
219
|
-
missing_keys = required_keys - o.keys()
|
|
220
|
-
|
|
221
|
-
if missing_keys:
|
|
222
|
-
raise TypeError(f"Missing required keys: {list(missing_keys)!r}.")
|
|
223
|
-
|
|
224
|
-
def validate_annotation(v: object, annotation: Union[str, type, object]) -> bool:
|
|
225
|
-
if isinstance(annotation, str):
|
|
226
|
-
raise NotImplementedError("Missing support for validating against stringized annotations.")
|
|
227
|
-
|
|
228
|
-
if (origin := get_origin(annotation)) is not None:
|
|
229
|
-
if origin is tuple:
|
|
230
|
-
validate_annotation(v, tuple)
|
|
231
|
-
tuple_args = get_args(annotation)
|
|
232
|
-
if len(cast(tuple, v)) != len(tuple_args):
|
|
233
|
-
raise TypeError(f"Expected a {len(tuple_args)}-tuple, got a {len(cast(tuple, v))}-tuple.")
|
|
234
|
-
for tuple_val, tuple_args in zip(cast(tuple, v), tuple_args):
|
|
235
|
-
validate_annotation(tuple_val, tuple_args)
|
|
236
|
-
elif origin is dict:
|
|
237
|
-
validate_annotation(v, dict)
|
|
238
|
-
dict_key_ann, dict_val_ann = get_args(annotation)
|
|
239
|
-
for dict_key, dict_val in cast(dict, v).items():
|
|
240
|
-
validate_annotation(dict_val, dict_val_ann)
|
|
241
|
-
validate_annotation(dict_key, dict_key_ann)
|
|
242
|
-
elif origin is list:
|
|
243
|
-
validate_annotation(v, list)
|
|
244
|
-
list_val_ann = get_args(annotation)[0]
|
|
245
|
-
for list_val in cast(list, v):
|
|
246
|
-
validate_annotation(list_val, list_val_ann)
|
|
247
|
-
elif origin is Union:
|
|
248
|
-
for generic_arg in get_args(annotation):
|
|
249
|
-
try:
|
|
250
|
-
validate_annotation(v, generic_arg)
|
|
251
|
-
return True
|
|
252
|
-
except TypeError:
|
|
253
|
-
pass
|
|
254
|
-
raise TypeError(f"Expected value to have type {annotation}. Received type {type(v)}")
|
|
255
|
-
elif origin is Literal:
|
|
256
|
-
literal_args = get_args(annotation)
|
|
257
|
-
if not any(type(literal) is type(v) and literal == v for literal in literal_args):
|
|
258
|
-
raise TypeError(f"Expected value to be one of {list(literal_args)!r}. Received type {type(v)}")
|
|
259
|
-
elif any(origin is g for g in (NotRequired, Required)):
|
|
260
|
-
validate_annotation(v, get_args(annotation)[0])
|
|
261
|
-
else:
|
|
262
|
-
raise NotImplementedError(f"Validation not implemented for generic {origin}.")
|
|
263
|
-
return True
|
|
264
|
-
|
|
265
|
-
if isinstance(annotation, type):
|
|
266
|
-
if not isinstance(v, annotation):
|
|
267
|
-
raise TypeError(f"Expected value to have type {annotation}. Received type {type(v)}.")
|
|
268
|
-
return True
|
|
269
|
-
|
|
270
|
-
raise ValueError("Annotation to validate against should be a str, type, or generic.")
|
|
271
|
-
|
|
272
|
-
for k, v in o.items():
|
|
273
|
-
validate_annotation(v, annotations[k])
|
|
274
|
-
|
|
275
|
-
return cast(T_TypedDict, o)
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
def parse_quality_evaluator_reason_score(llm_output: str) -> Tuple[float, str]:
|
|
279
|
-
"""Parse the output of prompt-based quality evaluators that return a score and reason.
|
|
280
|
-
|
|
281
|
-
Current supported evaluators:
|
|
282
|
-
- Fluency
|
|
283
|
-
- Relevance
|
|
284
|
-
- Retrieval
|
|
285
|
-
- Groundedness
|
|
286
|
-
- Coherence
|
|
287
|
-
|
|
288
|
-
:param llm_output: The output of the prompt-based quality evaluator.
|
|
289
|
-
:type llm_output: str
|
|
290
|
-
:return: The score and reason.
|
|
291
|
-
:rtype: Tuple[float, str]
|
|
292
|
-
"""
|
|
293
|
-
score = math.nan
|
|
294
|
-
reason = ""
|
|
295
|
-
if llm_output:
|
|
296
|
-
try:
|
|
297
|
-
score_pattern = r"<S2>\D*?([1-5]).*?</S2>"
|
|
298
|
-
reason_pattern = r"<S1>(.*?)</S1>"
|
|
299
|
-
score_match = re.findall(score_pattern, llm_output, re.DOTALL)
|
|
300
|
-
reason_match = re.findall(reason_pattern, llm_output, re.DOTALL)
|
|
301
|
-
if score_match:
|
|
302
|
-
score = float(score_match[0].strip())
|
|
303
|
-
if reason_match:
|
|
304
|
-
reason = reason_match[0].strip()
|
|
305
|
-
except ValueError as exc:
|
|
306
|
-
raise EvaluationException(
|
|
307
|
-
message=f"Failed to parse model output: \n{llm_output}",
|
|
308
|
-
internal_message="Failed to parse model output.",
|
|
309
|
-
category=ErrorCategory.FAILED_EXECUTION,
|
|
310
|
-
blame=ErrorBlame.SYSTEM_ERROR,
|
|
311
|
-
) from exc
|
|
312
|
-
|
|
313
|
-
return score, reason
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
def remove_optional_singletons(eval_class, singletons):
|
|
317
|
-
required_singletons = singletons.copy()
|
|
318
|
-
if hasattr(eval_class, "_OPTIONAL_PARAMS"): # pylint: disable=protected-access
|
|
319
|
-
for param in eval_class._OPTIONAL_PARAMS: # pylint: disable=protected-access
|
|
320
|
-
if param in singletons:
|
|
321
|
-
del required_singletons[param]
|
|
322
|
-
return required_singletons
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
def retrieve_content_type(assistant_messages: List, metric: str) -> str:
|
|
326
|
-
"""Get the content type for service payload.
|
|
327
|
-
|
|
328
|
-
:param assistant_messages: The list of messages to be annotated by evaluation service
|
|
329
|
-
:type assistant_messages: list
|
|
330
|
-
:param metric: A string representing the metric type
|
|
331
|
-
:type metric: str
|
|
332
|
-
:return: A text representing the content type. Example: 'text', or 'image'
|
|
333
|
-
:rtype: str
|
|
334
|
-
"""
|
|
335
|
-
# Check if metric is "protected_material"
|
|
336
|
-
if metric == "protected_material":
|
|
337
|
-
return "image"
|
|
338
|
-
|
|
339
|
-
# Iterate through each message
|
|
340
|
-
for message in assistant_messages:
|
|
341
|
-
# Ensure "content" exists in the message and is iterable
|
|
342
|
-
if isinstance(message.get("content", []), list):
|
|
343
|
-
for content in message.get("content", []):
|
|
344
|
-
if content.get("type") == "image_url":
|
|
345
|
-
return "image"
|
|
346
|
-
# Default return if no image was found
|
|
347
|
-
return "text"
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
def validate_conversation(conversation):
|
|
351
|
-
def raise_exception(msg, target):
|
|
352
|
-
raise EvaluationException(
|
|
353
|
-
message=msg,
|
|
354
|
-
internal_message=msg,
|
|
355
|
-
target=target,
|
|
356
|
-
category=ErrorCategory.INVALID_VALUE,
|
|
357
|
-
blame=ErrorBlame.USER_ERROR,
|
|
358
|
-
)
|
|
359
|
-
|
|
360
|
-
if not conversation or "messages" not in conversation:
|
|
361
|
-
raise_exception(
|
|
362
|
-
"Attribute 'messages' is missing in the request",
|
|
363
|
-
ErrorTarget.CONTENT_SAFETY_CHAT_EVALUATOR,
|
|
364
|
-
)
|
|
365
|
-
messages = conversation["messages"]
|
|
366
|
-
if not isinstance(messages, list):
|
|
367
|
-
raise_exception(
|
|
368
|
-
"'messages' parameter must be a JSON-compatible list of chat messages",
|
|
369
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
370
|
-
)
|
|
371
|
-
expected_roles = {"user", "assistant", "system"}
|
|
372
|
-
image_found = False
|
|
373
|
-
assistant_message_count = 0
|
|
374
|
-
user_message_count = 0
|
|
375
|
-
for num, message in enumerate(messages, 1):
|
|
376
|
-
if not isinstance(message, dict):
|
|
377
|
-
try:
|
|
378
|
-
from azure.ai.inference.models import (
|
|
379
|
-
ChatRequestMessage,
|
|
380
|
-
UserMessage,
|
|
381
|
-
AssistantMessage,
|
|
382
|
-
SystemMessage,
|
|
383
|
-
ImageContentItem,
|
|
384
|
-
)
|
|
385
|
-
except ImportError as ex:
|
|
386
|
-
raise MissingRequiredPackage(
|
|
387
|
-
message="Please install 'azure-ai-inference' package to use SystemMessage, "
|
|
388
|
-
"UserMessage or AssistantMessage."
|
|
389
|
-
) from ex
|
|
390
|
-
|
|
391
|
-
if isinstance(message, ChatRequestMessage) and not isinstance(
|
|
392
|
-
message, (UserMessage, AssistantMessage, SystemMessage)
|
|
393
|
-
):
|
|
394
|
-
raise_exception(
|
|
395
|
-
f"Messages must be a strongly typed class of ChatRequestMessage. Message number: {num}",
|
|
396
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
397
|
-
)
|
|
398
|
-
if isinstance(message, AssistantMessage):
|
|
399
|
-
assistant_message_count += 1
|
|
400
|
-
if isinstance(message, UserMessage):
|
|
401
|
-
user_message_count += 1
|
|
402
|
-
if isinstance(message.content, list) and any(
|
|
403
|
-
isinstance(item, ImageContentItem) for item in message.content
|
|
404
|
-
):
|
|
405
|
-
image_found = True
|
|
406
|
-
continue
|
|
407
|
-
if message.get("role") not in expected_roles:
|
|
408
|
-
raise_exception(
|
|
409
|
-
f"Invalid role provided: {message.get('role')}. Message number: {num}",
|
|
410
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
411
|
-
)
|
|
412
|
-
if message.get("role") == "assistant":
|
|
413
|
-
assistant_message_count += 1
|
|
414
|
-
if message.get("role") == "user":
|
|
415
|
-
user_message_count += 1
|
|
416
|
-
content = message.get("content")
|
|
417
|
-
if not isinstance(content, (str, list)):
|
|
418
|
-
raise_exception(
|
|
419
|
-
f"Content in each turn must be a string or array. Message number: {num}",
|
|
420
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
421
|
-
)
|
|
422
|
-
if isinstance(content, list):
|
|
423
|
-
if any(item.get("type") == "image_url" and "url" in item.get("image_url", {}) for item in content):
|
|
424
|
-
image_found = True
|
|
425
|
-
if not image_found:
|
|
426
|
-
raise_exception(
|
|
427
|
-
"Message needs to have multi-modal input like images.",
|
|
428
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
429
|
-
)
|
|
430
|
-
if assistant_message_count == 0:
|
|
431
|
-
raise_exception(
|
|
432
|
-
"Assistant role required in one of the messages.",
|
|
433
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
434
|
-
)
|
|
435
|
-
if user_message_count == 0:
|
|
436
|
-
raise_exception(
|
|
437
|
-
"User role required in one of the messages.",
|
|
438
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
439
|
-
)
|
|
440
|
-
if assistant_message_count > 1:
|
|
441
|
-
raise_exception(
|
|
442
|
-
"Evaluators for multimodal conversations only support single turn. "
|
|
443
|
-
"User and assistant role expected as the only role in each message.",
|
|
444
|
-
ErrorTarget.CONTENT_SAFETY_MULTIMODAL_EVALUATOR,
|
|
445
|
-
)
|
|
@@ -1,19 +1,18 @@
|
|
|
1
1
|
# ---------------------------------------------------------
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
|
-
from typing import Literal
|
|
5
4
|
|
|
6
5
|
|
|
7
6
|
class EvaluationMetrics:
|
|
8
7
|
"""Metrics for model evaluation."""
|
|
9
8
|
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
9
|
+
GPT_GROUNDEDNESS = "gpt_groundedness"
|
|
10
|
+
GPT_RELEVANCE = "gpt_relevance"
|
|
11
|
+
GPT_COHERENCE = "gpt_coherence"
|
|
12
|
+
GPT_FLUENCY = "gpt_fluency"
|
|
13
|
+
GPT_SIMILARITY = "gpt_similarity"
|
|
15
14
|
F1_SCORE = "f1_score"
|
|
16
|
-
|
|
15
|
+
GPT_RETRIEVAL_SCORE = "gpt_retrieval_score"
|
|
17
16
|
HATE_FAIRNESS = "hate_fairness"
|
|
18
17
|
HATE_UNFAIRNESS = "hate_unfairness"
|
|
19
18
|
VIOLENCE = "violence"
|
|
@@ -49,24 +48,12 @@ class DefaultOpenEncoding:
|
|
|
49
48
|
"""SDK Default Encoding when writing a file"""
|
|
50
49
|
|
|
51
50
|
|
|
52
|
-
class EvaluationRunProperties:
|
|
53
|
-
"""Defines properties used to identify an evaluation run by UI"""
|
|
54
|
-
|
|
55
|
-
RUN_TYPE = "runType"
|
|
56
|
-
EVALUATION_RUN = "_azureml.evaluation_run"
|
|
57
|
-
|
|
58
|
-
|
|
59
51
|
DEFAULT_EVALUATION_RESULTS_FILE_NAME = "evaluation_results.json"
|
|
60
52
|
|
|
61
53
|
CONTENT_SAFETY_DEFECT_RATE_THRESHOLD_DEFAULT = 4
|
|
62
54
|
|
|
63
55
|
PF_BATCH_TIMEOUT_SEC_DEFAULT = 3600
|
|
64
56
|
PF_BATCH_TIMEOUT_SEC = "PF_BATCH_TIMEOUT_SEC"
|
|
65
|
-
PF_DISABLE_TRACING = "PF_DISABLE_TRACING"
|
|
66
57
|
|
|
67
58
|
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT"
|
|
68
59
|
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT_DEFAULT = 60
|
|
69
|
-
|
|
70
|
-
AZURE_OPENAI_TYPE: Literal["azure_openai"] = "azure_openai"
|
|
71
|
-
|
|
72
|
-
OPENAI_TYPE: Literal["openai"] = "openai"
|
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
# ---------------------------------------------------------
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
|
-
from .
|
|
4
|
+
from .batch_run_context import BatchRunContext
|
|
5
5
|
from .code_client import CodeClient
|
|
6
6
|
from .proxy_client import ProxyClient
|
|
7
|
-
from .target_run_context import TargetRunContext
|
|
8
7
|
|
|
9
|
-
__all__ = ["CodeClient", "ProxyClient", "
|
|
8
|
+
__all__ = ["CodeClient", "ProxyClient", "BatchRunContext"]
|
|
@@ -2,8 +2,6 @@
|
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
import os
|
|
5
|
-
import types
|
|
6
|
-
from typing import Optional, Type, Union
|
|
7
5
|
|
|
8
6
|
from promptflow._sdk._constants import PF_FLOW_ENTRY_IN_TMP, PF_FLOW_META_LOAD_IN_SUBPROCESS
|
|
9
7
|
from promptflow._utils.user_agent_utils import ClientUserAgentUtil
|
|
@@ -14,7 +12,6 @@ from azure.ai.evaluation._constants import (
|
|
|
14
12
|
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT_DEFAULT,
|
|
15
13
|
PF_BATCH_TIMEOUT_SEC,
|
|
16
14
|
PF_BATCH_TIMEOUT_SEC_DEFAULT,
|
|
17
|
-
PF_DISABLE_TRACING,
|
|
18
15
|
)
|
|
19
16
|
|
|
20
17
|
from ..._user_agent import USER_AGENT
|
|
@@ -23,26 +20,22 @@ from .code_client import CodeClient
|
|
|
23
20
|
from .proxy_client import ProxyClient
|
|
24
21
|
|
|
25
22
|
|
|
26
|
-
class
|
|
27
|
-
"""Context manager for
|
|
23
|
+
class BatchRunContext:
|
|
24
|
+
"""Context manager for batch run clients.
|
|
28
25
|
|
|
29
26
|
:param client: The client to run in the context.
|
|
30
27
|
:type client: Union[
|
|
31
|
-
~azure.ai.evaluation._evaluate.
|
|
32
|
-
~azure.ai.evaluation._evaluate.
|
|
28
|
+
~azure.ai.evaluation._evaluate._batch_run_client.code_client.CodeClient,
|
|
29
|
+
~azure.ai.evaluation._evaluate._batch_run_client.proxy_client.ProxyClient
|
|
33
30
|
]
|
|
34
31
|
"""
|
|
35
32
|
|
|
36
|
-
def __init__(self, client
|
|
33
|
+
def __init__(self, client) -> None:
|
|
37
34
|
self.client = client
|
|
38
35
|
self._is_batch_timeout_set_by_system = False
|
|
39
36
|
self._is_otel_timeout_set_by_system = False
|
|
40
|
-
self._original_cwd = os.getcwd()
|
|
41
|
-
|
|
42
|
-
def __enter__(self) -> None:
|
|
43
|
-
# Preserve current working directory, as PF may change it without restoring it afterward
|
|
44
|
-
self._original_cwd = os.getcwd()
|
|
45
37
|
|
|
38
|
+
def __enter__(self):
|
|
46
39
|
if isinstance(self.client, CodeClient):
|
|
47
40
|
ClientUserAgentUtil.append_user_agent(USER_AGENT)
|
|
48
41
|
inject_openai_api()
|
|
@@ -50,7 +43,6 @@ class EvalRunContext:
|
|
|
50
43
|
if isinstance(self.client, ProxyClient):
|
|
51
44
|
os.environ[PF_FLOW_ENTRY_IN_TMP] = "true"
|
|
52
45
|
os.environ[PF_FLOW_META_LOAD_IN_SUBPROCESS] = "false"
|
|
53
|
-
os.environ[PF_DISABLE_TRACING] = "true"
|
|
54
46
|
|
|
55
47
|
if os.environ.get(PF_BATCH_TIMEOUT_SEC) is None:
|
|
56
48
|
os.environ[PF_BATCH_TIMEOUT_SEC] = str(PF_BATCH_TIMEOUT_SEC_DEFAULT)
|
|
@@ -64,21 +56,13 @@ class EvalRunContext:
|
|
|
64
56
|
# For addressing the issue of asyncio event loop closed on Windows
|
|
65
57
|
set_event_loop_policy()
|
|
66
58
|
|
|
67
|
-
def __exit__(
|
|
68
|
-
self,
|
|
69
|
-
exc_type: Optional[Type[BaseException]],
|
|
70
|
-
exc_value: Optional[BaseException],
|
|
71
|
-
exc_tb: Optional[types.TracebackType],
|
|
72
|
-
) -> None:
|
|
73
|
-
os.chdir(self._original_cwd)
|
|
74
|
-
|
|
59
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
75
60
|
if isinstance(self.client, CodeClient):
|
|
76
61
|
recover_openai_api()
|
|
77
62
|
|
|
78
63
|
if isinstance(self.client, ProxyClient):
|
|
79
64
|
os.environ.pop(PF_FLOW_ENTRY_IN_TMP, None)
|
|
80
65
|
os.environ.pop(PF_FLOW_META_LOAD_IN_SUBPROCESS, None)
|
|
81
|
-
os.environ.pop(PF_DISABLE_TRACING, None)
|
|
82
66
|
|
|
83
67
|
if self._is_batch_timeout_set_by_system:
|
|
84
68
|
os.environ.pop(PF_BATCH_TIMEOUT_SEC, None)
|