azure-ai-evaluation 1.1.0__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/ai/evaluation/__init__.py +1 -15
- azure/ai/evaluation/_azure/_clients.py +24 -8
- azure/ai/evaluation/_azure/_models.py +2 -2
- azure/ai/evaluation/_common/utils.py +8 -8
- azure/ai/evaluation/_constants.py +21 -0
- azure/ai/evaluation/_evaluate/_batch_run/__init__.py +2 -1
- azure/ai/evaluation/_evaluate/_eval_run.py +3 -1
- azure/ai/evaluation/_evaluate/_evaluate.py +74 -14
- azure/ai/evaluation/_evaluate/_utils.py +27 -0
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +46 -25
- azure/ai/evaluation/_evaluators/_common/__init__.py +2 -0
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +69 -4
- azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +61 -0
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +7 -1
- azure/ai/evaluation/_evaluators/_common/_conversation_aggregators.py +49 -0
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +5 -42
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +2 -0
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +2 -0
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +2 -0
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +2 -0
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +61 -68
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +45 -23
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +55 -34
- azure/ai/evaluation/_evaluators/_qa/_qa.py +32 -27
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +44 -23
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +41 -81
- azure/ai/evaluation/_exceptions.py +0 -1
- azure/ai/evaluation/_safety_evaluation/__init__.py +3 -0
- azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +640 -0
- azure/ai/evaluation/_version.py +2 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +10 -3
- azure/ai/evaluation/simulator/_conversation/__init__.py +4 -5
- azure/ai/evaluation/simulator/_conversation/_conversation.py +4 -0
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +2 -0
- azure/ai/evaluation/simulator/_simulator.py +21 -13
- {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.3.0.dist-info}/METADATA +77 -7
- {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.3.0.dist-info}/RECORD +40 -44
- azure/ai/evaluation/_evaluators/_multimodal/__init__.py +0 -20
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +0 -132
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +0 -55
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +0 -124
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +0 -100
- {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.3.0.dist-info}/NOTICE.txt +0 -0
- {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.3.0.dist-info}/WHEEL +0 -0
- {azure_ai_evaluation-1.1.0.dist-info → azure_ai_evaluation-1.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,100 +0,0 @@
|
|
|
1
|
-
# ---------------------------------------------------------
|
|
2
|
-
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
-
# ---------------------------------------------------------
|
|
4
|
-
from promptflow._utils.async_utils import async_run_allowing_running_loop
|
|
5
|
-
from azure.ai.evaluation._common._experimental import experimental
|
|
6
|
-
from azure.ai.evaluation._common.constants import EvaluationMetrics
|
|
7
|
-
from ._content_safety_multimodal_base import ContentSafetyMultimodalEvaluatorBase
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@experimental
|
|
11
|
-
class ViolenceMultimodalEvaluator:
|
|
12
|
-
"""
|
|
13
|
-
Initialize a violence multimodal evaluator for violence score.
|
|
14
|
-
|
|
15
|
-
:param credential: The credential for connecting to the Azure AI project. This is required.
|
|
16
|
-
:type credential: ~azure.core.credentials.TokenCredential
|
|
17
|
-
:param azure_ai_project: The scope of the Azure AI project,
|
|
18
|
-
which includes subscription ID, resource group name, and project name.
|
|
19
|
-
:type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
|
|
20
|
-
|
|
21
|
-
**Usage Example**
|
|
22
|
-
|
|
23
|
-
.. code-block:: python
|
|
24
|
-
|
|
25
|
-
azure_ai_project = {
|
|
26
|
-
"subscription_id": "<subscription_id>",
|
|
27
|
-
"resource_group_name": "<resource_group_name>",
|
|
28
|
-
"project_name": "<project_name>",
|
|
29
|
-
}
|
|
30
|
-
eval_fn = ViolenceMultimodalEvaluator(azure_ai_project)
|
|
31
|
-
result = eval_fn(conversation=
|
|
32
|
-
{
|
|
33
|
-
"messages": [
|
|
34
|
-
{
|
|
35
|
-
"role": "user",
|
|
36
|
-
"content": [
|
|
37
|
-
{
|
|
38
|
-
"type": "text",
|
|
39
|
-
"text": "What's in this image?"
|
|
40
|
-
},
|
|
41
|
-
{
|
|
42
|
-
"type": "image_url",
|
|
43
|
-
"image_url": {
|
|
44
|
-
"url": "<image url or base64 encoded image>"
|
|
45
|
-
}
|
|
46
|
-
}
|
|
47
|
-
]
|
|
48
|
-
},
|
|
49
|
-
{
|
|
50
|
-
"role": "assistant",
|
|
51
|
-
"content": "This picture shows an astronaut standing in the desert."
|
|
52
|
-
}
|
|
53
|
-
]
|
|
54
|
-
}
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
**Output Format**
|
|
58
|
-
|
|
59
|
-
.. code-block:: json
|
|
60
|
-
|
|
61
|
-
{
|
|
62
|
-
"violence": "High",
|
|
63
|
-
"violence_score": 6.5,
|
|
64
|
-
"violence_reason": "Some reason"
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
"""
|
|
68
|
-
|
|
69
|
-
id = "violence_multimodal"
|
|
70
|
-
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
71
|
-
|
|
72
|
-
def __init__(self, credential, azure_ai_project):
|
|
73
|
-
self._async_evaluator = _AsyncViolenceMultimodalEvaluator(credential, azure_ai_project)
|
|
74
|
-
|
|
75
|
-
def __call__(self, *, conversation, **kwargs):
|
|
76
|
-
"""
|
|
77
|
-
Evaluates violence content.
|
|
78
|
-
|
|
79
|
-
:keyword conversation: The conversation contains list of messages to be evaluated.
|
|
80
|
-
Each message should have "role" and "content" keys. It supports single turn only.
|
|
81
|
-
:paramtype conversation: ~azure.ai.evaluation.Conversation
|
|
82
|
-
:return: The violence score.
|
|
83
|
-
:rtype: Dict[str, Union[float, str]]
|
|
84
|
-
"""
|
|
85
|
-
return async_run_allowing_running_loop(self._async_evaluator, conversation=conversation, **kwargs)
|
|
86
|
-
|
|
87
|
-
def _to_async(self):
|
|
88
|
-
return self._async_evaluator
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
class _AsyncViolenceMultimodalEvaluator(ContentSafetyMultimodalEvaluatorBase):
|
|
92
|
-
def __init__(self, credential, azure_ai_project):
|
|
93
|
-
super().__init__(
|
|
94
|
-
metric=EvaluationMetrics.VIOLENCE,
|
|
95
|
-
credential=credential,
|
|
96
|
-
azure_ai_project=azure_ai_project,
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
async def __call__(self, *, conversation, **kwargs):
|
|
100
|
-
return await super().__call__(conversation=conversation, **kwargs)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|