azure-ai-evaluation 1.10.0__py3-none-any.whl → 1.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/_common/onedp/models/_models.py +5 -0
- azure/ai/evaluation/_converters/_ai_services.py +60 -10
- azure/ai/evaluation/_converters/_models.py +75 -26
- azure/ai/evaluation/_evaluate/_eval_run.py +14 -1
- azure/ai/evaluation/_evaluate/_evaluate.py +13 -4
- azure/ai/evaluation/_evaluate/_evaluate_aoai.py +104 -35
- azure/ai/evaluation/_evaluate/_utils.py +4 -0
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +2 -1
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +113 -19
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +7 -2
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +1 -1
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +2 -1
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +113 -3
- azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +8 -2
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +2 -1
- azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +10 -2
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +2 -1
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +2 -1
- azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +8 -2
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +104 -60
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +58 -41
- azure/ai/evaluation/_exceptions.py +1 -0
- azure/ai/evaluation/_version.py +1 -1
- azure/ai/evaluation/red_team/__init__.py +2 -1
- azure/ai/evaluation/red_team/_attack_objective_generator.py +17 -0
- azure/ai/evaluation/red_team/_callback_chat_target.py +14 -1
- azure/ai/evaluation/red_team/_evaluation_processor.py +376 -0
- azure/ai/evaluation/red_team/_mlflow_integration.py +322 -0
- azure/ai/evaluation/red_team/_orchestrator_manager.py +661 -0
- azure/ai/evaluation/red_team/_red_team.py +697 -3067
- azure/ai/evaluation/red_team/_result_processor.py +610 -0
- azure/ai/evaluation/red_team/_utils/__init__.py +34 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +3 -1
- azure/ai/evaluation/red_team/_utils/_rai_service_true_false_scorer.py +6 -0
- azure/ai/evaluation/red_team/_utils/exception_utils.py +345 -0
- azure/ai/evaluation/red_team/_utils/file_utils.py +266 -0
- azure/ai/evaluation/red_team/_utils/formatting_utils.py +115 -13
- azure/ai/evaluation/red_team/_utils/metric_mapping.py +24 -4
- azure/ai/evaluation/red_team/_utils/progress_utils.py +252 -0
- azure/ai/evaluation/red_team/_utils/retry_utils.py +218 -0
- azure/ai/evaluation/red_team/_utils/strategy_utils.py +17 -4
- azure/ai/evaluation/simulator/_adversarial_simulator.py +9 -0
- azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +19 -5
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +4 -3
- {azure_ai_evaluation-1.10.0.dist-info → azure_ai_evaluation-1.11.1.dist-info}/METADATA +39 -3
- {azure_ai_evaluation-1.10.0.dist-info → azure_ai_evaluation-1.11.1.dist-info}/RECORD +49 -41
- {azure_ai_evaluation-1.10.0.dist-info → azure_ai_evaluation-1.11.1.dist-info}/WHEEL +1 -1
- {azure_ai_evaluation-1.10.0.dist-info → azure_ai_evaluation-1.11.1.dist-info/licenses}/NOTICE.txt +0 -0
- {azure_ai_evaluation-1.10.0.dist-info → azure_ai_evaluation-1.11.1.dist-info}/top_level.txt +0 -0
|
@@ -1961,12 +1961,16 @@ class Message(_Model):
|
|
|
1961
1961
|
:vartype role: str
|
|
1962
1962
|
:ivar content: The content.
|
|
1963
1963
|
:vartype content: str
|
|
1964
|
+
:ivar context: The context.
|
|
1965
|
+
:vartype context: str
|
|
1964
1966
|
"""
|
|
1965
1967
|
|
|
1966
1968
|
role: Optional[str] = rest_field(name="Role", visibility=["read", "create", "update", "delete", "query"])
|
|
1967
1969
|
"""The role."""
|
|
1968
1970
|
content: Optional[str] = rest_field(name="Content", visibility=["read", "create", "update", "delete", "query"])
|
|
1969
1971
|
"""The content."""
|
|
1972
|
+
context: Optional[str] = rest_field(name="Context", visibility=["read", "create", "update", "delete", "query"])
|
|
1973
|
+
"""The context."""
|
|
1970
1974
|
|
|
1971
1975
|
@overload
|
|
1972
1976
|
def __init__(
|
|
@@ -1974,6 +1978,7 @@ class Message(_Model):
|
|
|
1974
1978
|
*,
|
|
1975
1979
|
role: Optional[str] = None,
|
|
1976
1980
|
content: Optional[str] = None,
|
|
1981
|
+
context: Optional[str] = None,
|
|
1977
1982
|
) -> None: ...
|
|
1978
1983
|
|
|
1979
1984
|
@overload
|
|
@@ -11,7 +11,18 @@ from azure.ai.evaluation._common._experimental import experimental
|
|
|
11
11
|
from packaging.version import Version
|
|
12
12
|
|
|
13
13
|
# Constants.
|
|
14
|
-
from ._models import
|
|
14
|
+
from ._models import (
|
|
15
|
+
_USER,
|
|
16
|
+
_AGENT,
|
|
17
|
+
_TOOL,
|
|
18
|
+
_TOOL_CALL,
|
|
19
|
+
_TOOL_CALLS,
|
|
20
|
+
_FUNCTION,
|
|
21
|
+
_BUILT_IN_DESCRIPTIONS,
|
|
22
|
+
_BUILT_IN_PARAMS,
|
|
23
|
+
_OPENAPI,
|
|
24
|
+
OpenAPIToolDefinition,
|
|
25
|
+
)
|
|
15
26
|
|
|
16
27
|
# Message instances.
|
|
17
28
|
from ._models import Message, SystemMessage, UserMessage, AssistantMessage, ToolCall
|
|
@@ -93,7 +104,7 @@ class AIAgentConverter:
|
|
|
93
104
|
return tool_calls_chronological
|
|
94
105
|
|
|
95
106
|
@staticmethod
|
|
96
|
-
def _extract_function_tool_definitions(thread_run: object) -> List[ToolDefinition]:
|
|
107
|
+
def _extract_function_tool_definitions(thread_run: object) -> List[Union[ToolDefinition, OpenAPIToolDefinition]]:
|
|
97
108
|
"""
|
|
98
109
|
Extracts tool definitions from a thread run.
|
|
99
110
|
|
|
@@ -121,6 +132,26 @@ class AIAgentConverter:
|
|
|
121
132
|
parameters=parameters,
|
|
122
133
|
)
|
|
123
134
|
)
|
|
135
|
+
elif tool.type == _OPENAPI:
|
|
136
|
+
openapi_tool = tool.openapi
|
|
137
|
+
tool_definition = OpenAPIToolDefinition(
|
|
138
|
+
name=openapi_tool.name,
|
|
139
|
+
description=openapi_tool.description,
|
|
140
|
+
type=_OPENAPI,
|
|
141
|
+
spec=openapi_tool.spec,
|
|
142
|
+
auth=openapi_tool.auth.as_dict(),
|
|
143
|
+
default_params=openapi_tool.default_params.as_dict() if openapi_tool.default_params else None,
|
|
144
|
+
functions=[
|
|
145
|
+
ToolDefinition(
|
|
146
|
+
name=func.get("name"),
|
|
147
|
+
description=func.get("description"),
|
|
148
|
+
parameters=func.get("parameters"),
|
|
149
|
+
type="function",
|
|
150
|
+
)
|
|
151
|
+
for func in openapi_tool.get("functions")
|
|
152
|
+
],
|
|
153
|
+
)
|
|
154
|
+
final_tools.append(tool_definition)
|
|
124
155
|
else:
|
|
125
156
|
# Add limited support for built-in tools. Descriptions and parameters
|
|
126
157
|
# are not published, but we'll include placeholders.
|
|
@@ -243,16 +274,30 @@ class AIAgentConverter:
|
|
|
243
274
|
if len(single_turn.content) < 1:
|
|
244
275
|
continue
|
|
245
276
|
|
|
246
|
-
|
|
247
|
-
content
|
|
248
|
-
|
|
249
|
-
"text":
|
|
250
|
-
|
|
277
|
+
content_list = []
|
|
278
|
+
# If content is a list, process all content items.
|
|
279
|
+
for content_item in single_turn.content:
|
|
280
|
+
if content_item.type == "text":
|
|
281
|
+
content_list.append(
|
|
282
|
+
{
|
|
283
|
+
"type": "text",
|
|
284
|
+
"text": content_item.text.value,
|
|
285
|
+
}
|
|
286
|
+
)
|
|
287
|
+
elif content_item.type == "image":
|
|
288
|
+
content_list.append(
|
|
289
|
+
{
|
|
290
|
+
"type": "image",
|
|
291
|
+
"image": {
|
|
292
|
+
"file_id": content_item.image_file.file_id,
|
|
293
|
+
},
|
|
294
|
+
}
|
|
295
|
+
)
|
|
251
296
|
|
|
252
297
|
# If we have a user message, then we save it as such and since it's a human message, there is no
|
|
253
298
|
# run_id associated with it.
|
|
254
299
|
if single_turn.role == _USER:
|
|
255
|
-
final_messages.append(UserMessage(content=
|
|
300
|
+
final_messages.append(UserMessage(content=content_list, createdAt=single_turn.created_at))
|
|
256
301
|
continue
|
|
257
302
|
|
|
258
303
|
# In this case, we have an assistant message. Unfortunately, this would only have the user-facing
|
|
@@ -261,7 +306,7 @@ class AIAgentConverter:
|
|
|
261
306
|
if single_turn.role == _AGENT:
|
|
262
307
|
# We are required to put the run_id in the assistant message.
|
|
263
308
|
final_messages.append(
|
|
264
|
-
AssistantMessage(content=
|
|
309
|
+
AssistantMessage(content=content_list, run_id=single_turn.run_id, createdAt=single_turn.created_at)
|
|
265
310
|
)
|
|
266
311
|
continue
|
|
267
312
|
|
|
@@ -791,6 +836,7 @@ class LegacyAgentDataRetriever(AIAgentDataRetriever):
|
|
|
791
836
|
limit=self._AI_SERVICES_API_MAX_LIMIT,
|
|
792
837
|
order="asc",
|
|
793
838
|
after=after,
|
|
839
|
+
include=["step_details.tool_calls[*].file_search.results[*].content"],
|
|
794
840
|
)
|
|
795
841
|
has_more = run_steps.has_more
|
|
796
842
|
after = run_steps.last_id
|
|
@@ -838,7 +884,11 @@ class FDPAgentDataRetriever(AIAgentDataRetriever):
|
|
|
838
884
|
def _list_run_steps_chronological(self, thread_id: str, run_id: str):
|
|
839
885
|
|
|
840
886
|
return self.project_client.agents.run_steps.list(
|
|
841
|
-
thread_id=thread_id,
|
|
887
|
+
thread_id=thread_id,
|
|
888
|
+
run_id=run_id,
|
|
889
|
+
limit=self._AI_SERVICES_API_MAX_LIMIT,
|
|
890
|
+
order="asc",
|
|
891
|
+
include=["step_details.tool_calls[*].file_search.results[*].content"],
|
|
842
892
|
)
|
|
843
893
|
|
|
844
894
|
def _list_run_ids_chronological(self, thread_id: str) -> List[str]:
|
|
@@ -3,17 +3,31 @@ import json
|
|
|
3
3
|
|
|
4
4
|
from pydantic import BaseModel
|
|
5
5
|
|
|
6
|
-
from typing import List, Optional, Union
|
|
6
|
+
from typing import TYPE_CHECKING, Any, List, Optional, Union
|
|
7
7
|
|
|
8
8
|
# Models moved in a later version of agents SDK, so try a few different locations
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
9
|
+
# Only import for type checking to avoid runtime import errors
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
try:
|
|
12
|
+
from azure.ai.projects.models import RunStepFunctionToolCall
|
|
13
|
+
except ImportError:
|
|
14
|
+
try:
|
|
15
|
+
from azure.ai.agents.models import RunStepFunctionToolCall
|
|
16
|
+
except ImportError:
|
|
17
|
+
# Create a protocol for type checking when the real class isn't available
|
|
18
|
+
from typing import Protocol
|
|
19
|
+
|
|
20
|
+
class RunStepFunctionToolCall(Protocol):
|
|
21
|
+
"""Protocol defining the expected interface for RunStepFunctionToolCall."""
|
|
22
|
+
|
|
23
|
+
id: str
|
|
24
|
+
type: str
|
|
25
|
+
|
|
26
|
+
def get(self, key: str, default: Any = None) -> Any: ...
|
|
27
|
+
|
|
28
|
+
else:
|
|
29
|
+
# At runtime, we don't need the actual class since it's only used in type annotations
|
|
30
|
+
RunStepFunctionToolCall = Any
|
|
17
31
|
|
|
18
32
|
# Message roles constants.
|
|
19
33
|
_SYSTEM = "system"
|
|
@@ -33,9 +47,12 @@ _TOOL_CALLS = "tool_calls"
|
|
|
33
47
|
# Constants to only be used internally in this file for the built-in tools.
|
|
34
48
|
_CODE_INTERPRETER = "code_interpreter"
|
|
35
49
|
_BING_GROUNDING = "bing_grounding"
|
|
50
|
+
_BING_CUSTOM_SEARCH = "bing_custom_search"
|
|
36
51
|
_FILE_SEARCH = "file_search"
|
|
37
52
|
_AZURE_AI_SEARCH = "azure_ai_search"
|
|
53
|
+
_SHAREPOINT_GROUNDING = "sharepoint_grounding"
|
|
38
54
|
_FABRIC_DATAAGENT = "fabric_dataagent"
|
|
55
|
+
_OPENAPI = "openapi"
|
|
39
56
|
|
|
40
57
|
# Built-in tool descriptions and parameters are hidden, but we include basic descriptions
|
|
41
58
|
# for evaluation purposes.
|
|
@@ -44,8 +61,10 @@ _BUILT_IN_DESCRIPTIONS = {
|
|
|
44
61
|
+ "generate code, and create graphs and charts using your data. Supports "
|
|
45
62
|
+ "up to 20 files.",
|
|
46
63
|
_BING_GROUNDING: "Enhance model output with web data.",
|
|
47
|
-
|
|
64
|
+
_BING_CUSTOM_SEARCH: "Enables agents to retrieve content from a curated subset of websites, enhancing relevance and reducing noise from public web searches.",
|
|
65
|
+
_FILE_SEARCH: "Search for data across uploaded files. A single call can return multiple results/files in the 'results' field.",
|
|
48
66
|
_AZURE_AI_SEARCH: "Search an Azure AI Search index for relevant data.",
|
|
67
|
+
_SHAREPOINT_GROUNDING: "Allows agents to access and retrieve relevant content from Microsoft SharePoint document libraries, grounding responses in organizational knowledge.",
|
|
49
68
|
_FABRIC_DATAAGENT: "Connect to Microsoft Fabric data agents to retrieve data across different data sources.",
|
|
50
69
|
}
|
|
51
70
|
|
|
@@ -59,6 +78,15 @@ _BUILT_IN_PARAMS = {
|
|
|
59
78
|
"type": "object",
|
|
60
79
|
"properties": {"requesturl": {"type": "string", "description": "URL used in Bing Search API."}},
|
|
61
80
|
},
|
|
81
|
+
_BING_CUSTOM_SEARCH: {
|
|
82
|
+
"type": "object",
|
|
83
|
+
"properties": {
|
|
84
|
+
"requesturl": {
|
|
85
|
+
"type": "string",
|
|
86
|
+
"description": "Search queries, along with pre-configured site restrictions or domain filters.",
|
|
87
|
+
}
|
|
88
|
+
},
|
|
89
|
+
},
|
|
62
90
|
_FILE_SEARCH: {
|
|
63
91
|
"type": "object",
|
|
64
92
|
"properties": {
|
|
@@ -76,6 +104,12 @@ _BUILT_IN_PARAMS = {
|
|
|
76
104
|
"type": "object",
|
|
77
105
|
"properties": {"input": {"type": "string", "description": "Search terms to use."}},
|
|
78
106
|
},
|
|
107
|
+
_SHAREPOINT_GROUNDING: {
|
|
108
|
+
"type": "object",
|
|
109
|
+
"properties": {
|
|
110
|
+
"input": {"type": "string", "description": "A natural language query to search SharePoint content."}
|
|
111
|
+
},
|
|
112
|
+
},
|
|
79
113
|
_FABRIC_DATAAGENT: {
|
|
80
114
|
"type": "object",
|
|
81
115
|
"properties": {"input": {"type": "string", "description": "Search terms to use."}},
|
|
@@ -217,6 +251,27 @@ class ToolDefinition(BaseModel):
|
|
|
217
251
|
parameters: dict
|
|
218
252
|
|
|
219
253
|
|
|
254
|
+
class OpenAPIToolDefinition(BaseModel):
|
|
255
|
+
"""Represents OpenAPI tool definition that will be used in the agent.
|
|
256
|
+
:param name: The name of the tool.
|
|
257
|
+
:type name: str
|
|
258
|
+
:param type: The type of the tool.
|
|
259
|
+
:type type: str
|
|
260
|
+
:param description: A description of the tool.
|
|
261
|
+
:type description: str
|
|
262
|
+
:param parameters: The parameters required by the tool.
|
|
263
|
+
:type parameters: dict
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
name: str
|
|
267
|
+
type: str
|
|
268
|
+
description: Optional[str] = None
|
|
269
|
+
spec: object
|
|
270
|
+
auth: object
|
|
271
|
+
default_params: Optional[list[str]] = None
|
|
272
|
+
functions: list[ToolDefinition]
|
|
273
|
+
|
|
274
|
+
|
|
220
275
|
class ToolCall:
|
|
221
276
|
"""Represents a tool call, used as an intermediate step in the conversion process.
|
|
222
277
|
|
|
@@ -247,7 +302,7 @@ class EvaluatorData(BaseModel):
|
|
|
247
302
|
|
|
248
303
|
query: List[Message]
|
|
249
304
|
response: List[Message]
|
|
250
|
-
tool_definitions: List[ToolDefinition]
|
|
305
|
+
tool_definitions: List[Union[ToolDefinition, OpenAPIToolDefinition]]
|
|
251
306
|
|
|
252
307
|
def to_json(self):
|
|
253
308
|
"""Converts the result to a JSON string.
|
|
@@ -277,14 +332,16 @@ def break_tool_call_into_messages(tool_call: ToolCall, run_id: str) -> List[Mess
|
|
|
277
332
|
# all in most of the cases, and bing would only show the API URL, without arguments or results.
|
|
278
333
|
# Bing grounding would have "bing_grounding" in details with "requesturl" that will just be the API path with query.
|
|
279
334
|
# TODO: Work with AI Services to add converter support for BingGrounding and CodeInterpreter.
|
|
280
|
-
if hasattr(tool_call.details, _FUNCTION):
|
|
335
|
+
if hasattr(tool_call.details, _FUNCTION) or tool_call.details.get("function"):
|
|
281
336
|
# This is the internals of the content object that will be included with the tool call.
|
|
282
337
|
tool_call_id = tool_call.details.id
|
|
283
338
|
content_tool_call = {
|
|
284
339
|
"type": _TOOL_CALL,
|
|
285
340
|
"tool_call_id": tool_call_id,
|
|
286
|
-
"name": tool_call.details.
|
|
287
|
-
"arguments": safe_loads(
|
|
341
|
+
"name": tool_call.details.get(_FUNCTION).get("name") if tool_call.details.get(_FUNCTION) else None,
|
|
342
|
+
"arguments": safe_loads(
|
|
343
|
+
tool_call.details.get(_FUNCTION).get("arguments") if tool_call.details.get(_FUNCTION) else None
|
|
344
|
+
),
|
|
288
345
|
}
|
|
289
346
|
else:
|
|
290
347
|
# Treat built-in tools separately. Object models may be unique so handle each case separately
|
|
@@ -322,27 +379,19 @@ def break_tool_call_into_messages(tool_call: ToolCall, run_id: str) -> List[Mess
|
|
|
322
379
|
# assistant's action of calling the tool.
|
|
323
380
|
messages.append(AssistantMessage(run_id=run_id, content=[to_dict(content_tool_call)], createdAt=tool_call.created))
|
|
324
381
|
|
|
325
|
-
if hasattr(tool_call.details, _FUNCTION):
|
|
326
|
-
output = safe_loads(tool_call.details.function["output"])
|
|
382
|
+
if hasattr(tool_call.details, _FUNCTION) or tool_call.details.get("function"):
|
|
383
|
+
output = safe_loads(tool_call.details.get("function")["output"])
|
|
327
384
|
else:
|
|
328
385
|
try:
|
|
329
386
|
# Some built-ins may have output, others may not
|
|
330
387
|
# Try to retrieve it, but if we don't find anything, skip adding the message
|
|
331
388
|
# Just manually converting to dicts for easy serialization for now rather than custom serializers
|
|
332
389
|
if tool_call.details.type == _CODE_INTERPRETER:
|
|
333
|
-
output = tool_call.details.code_interpreter.outputs
|
|
390
|
+
output = [result.as_dict() for result in tool_call.details.code_interpreter.outputs]
|
|
334
391
|
elif tool_call.details.type == _BING_GROUNDING:
|
|
335
392
|
return messages # not supported yet from bing grounding tool
|
|
336
393
|
elif tool_call.details.type == _FILE_SEARCH:
|
|
337
|
-
output = [
|
|
338
|
-
{
|
|
339
|
-
"file_id": result.file_id,
|
|
340
|
-
"file_name": result.file_name,
|
|
341
|
-
"score": result.score,
|
|
342
|
-
"content": result.content,
|
|
343
|
-
}
|
|
344
|
-
for result in tool_call.details.file_search.results
|
|
345
|
-
]
|
|
394
|
+
output = [result.as_dict() for result in tool_call.details.file_search.results]
|
|
346
395
|
elif tool_call.details.type == _AZURE_AI_SEARCH:
|
|
347
396
|
output = tool_call.details.azure_ai_search["output"]
|
|
348
397
|
elif tool_call.details.type == _FABRIC_DATAAGENT:
|
|
@@ -81,6 +81,8 @@ class EvalRun(contextlib.AbstractContextManager): # pylint: disable=too-many-in
|
|
|
81
81
|
~azure.ai.evaluation._promptflow.azure._lite_azure_management_client.LiteMLClient
|
|
82
82
|
:param promptflow_run: The promptflow run used by the
|
|
83
83
|
:type promptflow_run: Optional[promptflow._sdk.entities.Run]
|
|
84
|
+
:param tags: A dictionary of tags to be added to the evaluation run for tracking and organization purposes.
|
|
85
|
+
:type tags: Optional[Dict[str, str]]
|
|
84
86
|
"""
|
|
85
87
|
|
|
86
88
|
_MAX_RETRIES = 5
|
|
@@ -98,6 +100,7 @@ class EvalRun(contextlib.AbstractContextManager): # pylint: disable=too-many-in
|
|
|
98
100
|
workspace_name: str,
|
|
99
101
|
management_client: LiteMLClient,
|
|
100
102
|
promptflow_run: Optional[Run] = None,
|
|
103
|
+
tags: Optional[Dict[str, str]] = None,
|
|
101
104
|
) -> None:
|
|
102
105
|
self._tracking_uri: str = tracking_uri
|
|
103
106
|
self._subscription_id: str = subscription_id
|
|
@@ -107,6 +110,7 @@ class EvalRun(contextlib.AbstractContextManager): # pylint: disable=too-many-in
|
|
|
107
110
|
self._is_promptflow_run: bool = promptflow_run is not None
|
|
108
111
|
self._run_name = run_name
|
|
109
112
|
self._promptflow_run = promptflow_run
|
|
113
|
+
self._tags = tags or {}
|
|
110
114
|
self._status = RunStatus.NOT_STARTED
|
|
111
115
|
self._url_base: Optional[str] = None
|
|
112
116
|
self._info: Optional[RunInfo] = None
|
|
@@ -173,11 +177,20 @@ class EvalRun(contextlib.AbstractContextManager): # pylint: disable=too-many-in
|
|
|
173
177
|
)
|
|
174
178
|
else:
|
|
175
179
|
url = f"https://{self._url_base}/mlflow/v2.0" f"{self._get_scope()}/api/2.0/mlflow/runs/create"
|
|
180
|
+
|
|
181
|
+
# Prepare tags: start with user tags, ensure mlflow.user is set
|
|
182
|
+
run_tags = self._tags.copy()
|
|
183
|
+
if "mlflow.user" not in run_tags:
|
|
184
|
+
run_tags["mlflow.user"] = "azure-ai-evaluation"
|
|
185
|
+
|
|
186
|
+
# Convert tags to MLflow format
|
|
187
|
+
tags_list = [{"key": key, "value": value} for key, value in run_tags.items()]
|
|
188
|
+
|
|
176
189
|
body = {
|
|
177
190
|
"experiment_id": "0",
|
|
178
191
|
"user_id": "azure-ai-evaluation",
|
|
179
192
|
"start_time": int(time.time() * 1000),
|
|
180
|
-
"tags":
|
|
193
|
+
"tags": tags_list,
|
|
181
194
|
}
|
|
182
195
|
if self._run_name:
|
|
183
196
|
body["run_name"] = self._run_name
|
|
@@ -464,7 +464,7 @@ def _validate_columns_for_evaluators(
|
|
|
464
464
|
)
|
|
465
465
|
|
|
466
466
|
|
|
467
|
-
def _validate_and_load_data(target, data, evaluators, output_path, azure_ai_project, evaluation_name):
|
|
467
|
+
def _validate_and_load_data(target, data, evaluators, output_path, azure_ai_project, evaluation_name, tags):
|
|
468
468
|
if data is None:
|
|
469
469
|
msg = "The 'data' parameter is required for evaluation."
|
|
470
470
|
raise EvaluationException(
|
|
@@ -725,6 +725,7 @@ def evaluate(
|
|
|
725
725
|
azure_ai_project: Optional[Union[str, AzureAIProject]] = None,
|
|
726
726
|
output_path: Optional[Union[str, os.PathLike]] = None,
|
|
727
727
|
fail_on_evaluator_errors: bool = False,
|
|
728
|
+
tags: Optional[Dict[str, str]] = None,
|
|
728
729
|
**kwargs,
|
|
729
730
|
) -> EvaluationResult:
|
|
730
731
|
"""Evaluates target or data with built-in or custom evaluators. If both target and data are provided,
|
|
@@ -757,6 +758,10 @@ def evaluate(
|
|
|
757
758
|
Defaults to false, which means that evaluations will continue regardless of failures.
|
|
758
759
|
If such failures occur, metrics may be missing, and evidence of failures can be found in the evaluation's logs.
|
|
759
760
|
:paramtype fail_on_evaluator_errors: bool
|
|
761
|
+
:keyword tags: A dictionary of tags to be added to the evaluation run for tracking and organization purposes.
|
|
762
|
+
Keys and values must be strings. For more information about tag limits, see:
|
|
763
|
+
https://learn.microsoft.com/en-us/azure/machine-learning/resource-limits-capacity?view=azureml-api-2#runs
|
|
764
|
+
:paramtype tags: Optional[Dict[str, str]]
|
|
760
765
|
:keyword user_agent: A string to append to the default user-agent sent with evaluation http requests
|
|
761
766
|
:paramtype user_agent: Optional[str]
|
|
762
767
|
:return: Evaluation results.
|
|
@@ -793,6 +798,7 @@ def evaluate(
|
|
|
793
798
|
azure_ai_project=azure_ai_project,
|
|
794
799
|
output_path=output_path,
|
|
795
800
|
fail_on_evaluator_errors=fail_on_evaluator_errors,
|
|
801
|
+
tags=tags,
|
|
796
802
|
**kwargs,
|
|
797
803
|
)
|
|
798
804
|
except Exception as e:
|
|
@@ -861,6 +867,7 @@ def _evaluate( # pylint: disable=too-many-locals,too-many-statements
|
|
|
861
867
|
azure_ai_project: Optional[Union[str, AzureAIProject]] = None,
|
|
862
868
|
output_path: Optional[Union[str, os.PathLike]] = None,
|
|
863
869
|
fail_on_evaluator_errors: bool = False,
|
|
870
|
+
tags: Optional[Dict[str, str]] = None,
|
|
864
871
|
**kwargs,
|
|
865
872
|
) -> EvaluationResult:
|
|
866
873
|
if fail_on_evaluator_errors:
|
|
@@ -877,6 +884,7 @@ def _evaluate( # pylint: disable=too-many-locals,too-many-statements
|
|
|
877
884
|
azure_ai_project=azure_ai_project,
|
|
878
885
|
evaluation_name=evaluation_name,
|
|
879
886
|
fail_on_evaluator_errors=fail_on_evaluator_errors,
|
|
887
|
+
tags=tags,
|
|
880
888
|
**kwargs,
|
|
881
889
|
)
|
|
882
890
|
|
|
@@ -956,7 +964,7 @@ def _evaluate( # pylint: disable=too-many-locals,too-many-statements
|
|
|
956
964
|
name_map = _map_names_to_builtins(evaluators, graders)
|
|
957
965
|
if is_onedp_project(azure_ai_project):
|
|
958
966
|
studio_url = _log_metrics_and_instance_results_onedp(
|
|
959
|
-
metrics, results_df, azure_ai_project, evaluation_name, name_map, **kwargs
|
|
967
|
+
metrics, results_df, azure_ai_project, evaluation_name, name_map, tags=tags, **kwargs
|
|
960
968
|
)
|
|
961
969
|
else:
|
|
962
970
|
# Since tracing is disabled, pass None for target_run so a dummy evaluation run will be created each time.
|
|
@@ -964,7 +972,7 @@ def _evaluate( # pylint: disable=too-many-locals,too-many-statements
|
|
|
964
972
|
studio_url = None
|
|
965
973
|
if trace_destination:
|
|
966
974
|
studio_url = _log_metrics_and_instance_results(
|
|
967
|
-
metrics, results_df, trace_destination, None, evaluation_name, name_map, **kwargs
|
|
975
|
+
metrics, results_df, trace_destination, None, evaluation_name, name_map, tags=tags, **kwargs
|
|
968
976
|
)
|
|
969
977
|
|
|
970
978
|
result_df_dict = results_df.to_dict("records")
|
|
@@ -985,6 +993,7 @@ def _preprocess_data(
|
|
|
985
993
|
azure_ai_project: Optional[Union[str, AzureAIProject]] = None,
|
|
986
994
|
evaluation_name: Optional[str] = None,
|
|
987
995
|
fail_on_evaluator_errors: bool = False,
|
|
996
|
+
tags: Optional[Dict[str, str]] = None,
|
|
988
997
|
**kwargs,
|
|
989
998
|
) -> __ValidatedData:
|
|
990
999
|
# Process evaluator config to replace ${target.} with ${data.}
|
|
@@ -992,7 +1001,7 @@ def _preprocess_data(
|
|
|
992
1001
|
evaluator_config = {}
|
|
993
1002
|
|
|
994
1003
|
input_data_df = _validate_and_load_data(
|
|
995
|
-
target, data, evaluators_and_graders, output_path, azure_ai_project, evaluation_name
|
|
1004
|
+
target, data, evaluators_and_graders, output_path, azure_ai_project, evaluation_name, tags
|
|
996
1005
|
)
|
|
997
1006
|
if target is not None:
|
|
998
1007
|
_validate_columns_for_target(input_data_df, target)
|