azure-ai-evaluation 1.0.0b2__py3-none-any.whl → 1.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/__init__.py +100 -5
- azure/ai/evaluation/{_evaluators/_chat → _aoai}/__init__.py +3 -2
- azure/ai/evaluation/_aoai/aoai_grader.py +140 -0
- azure/ai/evaluation/_aoai/label_grader.py +68 -0
- azure/ai/evaluation/_aoai/python_grader.py +86 -0
- azure/ai/evaluation/_aoai/score_model_grader.py +94 -0
- azure/ai/evaluation/_aoai/string_check_grader.py +66 -0
- azure/ai/evaluation/_aoai/text_similarity_grader.py +80 -0
- azure/ai/evaluation/_azure/__init__.py +3 -0
- azure/ai/evaluation/_azure/_clients.py +204 -0
- azure/ai/evaluation/_azure/_envs.py +207 -0
- azure/ai/evaluation/_azure/_models.py +227 -0
- azure/ai/evaluation/_azure/_token_manager.py +129 -0
- azure/ai/evaluation/_common/__init__.py +9 -1
- azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +24 -9
- azure/ai/evaluation/_common/constants.py +131 -2
- azure/ai/evaluation/_common/evaluation_onedp_client.py +169 -0
- azure/ai/evaluation/_common/math.py +89 -0
- azure/ai/evaluation/_common/onedp/__init__.py +32 -0
- azure/ai/evaluation/_common/onedp/_client.py +166 -0
- azure/ai/evaluation/_common/onedp/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/_model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/_serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_types.py +21 -0
- azure/ai/evaluation/_common/onedp/_utils/__init__.py +6 -0
- azure/ai/evaluation/_common/onedp/_utils/model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_utils/serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_validation.py +66 -0
- azure/ai/evaluation/_common/onedp/_vendor.py +50 -0
- azure/ai/evaluation/_common/onedp/_version.py +9 -0
- azure/ai/evaluation/_common/onedp/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/onedp/aio/_client.py +168 -0
- azure/ai/evaluation/_common/onedp/aio/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/aio/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/aio/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_operations.py +7143 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/models/__init__.py +358 -0
- azure/ai/evaluation/_common/onedp/models/_enums.py +447 -0
- azure/ai/evaluation/_common/onedp/models/_models.py +5963 -0
- azure/ai/evaluation/_common/onedp/models/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/operations/_operations.py +8951 -0
- azure/ai/evaluation/_common/onedp/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/py.typed +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/rai_service.py +831 -142
- azure/ai/evaluation/_common/raiclient/__init__.py +34 -0
- azure/ai/evaluation/_common/raiclient/_client.py +128 -0
- azure/ai/evaluation/_common/raiclient/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/_model_base.py +1235 -0
- azure/ai/evaluation/_common/raiclient/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/_serialization.py +2050 -0
- azure/ai/evaluation/_common/raiclient/_version.py +9 -0
- azure/ai/evaluation/_common/raiclient/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/raiclient/aio/_client.py +130 -0
- azure/ai/evaluation/_common/raiclient/aio/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/aio/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_operations.py +981 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/models/__init__.py +60 -0
- azure/ai/evaluation/_common/raiclient/models/_enums.py +18 -0
- azure/ai/evaluation/_common/raiclient/models/_models.py +651 -0
- azure/ai/evaluation/_common/raiclient/models/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/operations/_operations.py +1238 -0
- azure/ai/evaluation/_common/raiclient/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/py.typed +1 -0
- azure/ai/evaluation/_common/utils.py +870 -34
- azure/ai/evaluation/_constants.py +167 -6
- azure/ai/evaluation/_converters/__init__.py +3 -0
- azure/ai/evaluation/_converters/_ai_services.py +899 -0
- azure/ai/evaluation/_converters/_models.py +467 -0
- azure/ai/evaluation/_converters/_sk_services.py +495 -0
- azure/ai/evaluation/_eval_mapping.py +83 -0
- azure/ai/evaluation/_evaluate/_batch_run/__init__.py +17 -0
- azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +176 -0
- azure/ai/evaluation/_evaluate/_batch_run/batch_clients.py +82 -0
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +47 -25
- azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +42 -13
- azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +124 -0
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +62 -0
- azure/ai/evaluation/_evaluate/_eval_run.py +102 -59
- azure/ai/evaluation/_evaluate/_evaluate.py +2134 -311
- azure/ai/evaluation/_evaluate/_evaluate_aoai.py +992 -0
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +14 -99
- azure/ai/evaluation/_evaluate/_utils.py +289 -40
- azure/ai/evaluation/_evaluator_definition.py +76 -0
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +93 -42
- azure/ai/evaluation/_evaluators/_code_vulnerability/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +119 -0
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +117 -91
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -39
- azure/ai/evaluation/_evaluators/_common/__init__.py +15 -0
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +742 -0
- azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +63 -0
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +345 -0
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +198 -0
- azure/ai/evaluation/_evaluators/_common/_conversation_aggregators.py +49 -0
- azure/ai/evaluation/_evaluators/_content_safety/__init__.py +0 -4
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +144 -86
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +138 -57
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +123 -55
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +133 -54
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +134 -54
- azure/ai/evaluation/_evaluators/_document_retrieval/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_document_retrieval/_document_retrieval.py +442 -0
- azure/ai/evaluation/_evaluators/_eci/_eci.py +49 -56
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +102 -60
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +115 -92
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -41
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +90 -37
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +318 -82
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +114 -0
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +104 -0
- azure/ai/evaluation/{_evaluate/_batch_run_client → _evaluators/_intent_resolution}/__init__.py +3 -4
- azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +196 -0
- azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +275 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +107 -61
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +104 -77
- azure/ai/evaluation/_evaluators/_qa/_qa.py +115 -63
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +182 -98
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +178 -49
- azure/ai/evaluation/_evaluators/_response_completeness/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +202 -0
- azure/ai/evaluation/_evaluators/_response_completeness/response_completeness.prompty +84 -0
- azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +148 -0
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +93 -0
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +189 -50
- azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +179 -0
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +102 -91
- azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +0 -5
- azure/ai/evaluation/_evaluators/_task_adherence/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +226 -0
- azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +101 -0
- azure/ai/evaluation/_evaluators/_task_completion/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_completion/_task_completion.py +177 -0
- azure/ai/evaluation/_evaluators/_task_completion/task_completion.prompty +220 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/_task_navigation_efficiency.py +384 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +298 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +166 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/_tool_input_accuracy.py +263 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/tool_input_accuracy.prompty +76 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/_tool_output_utilization.py +225 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/tool_output_utilization.prompty +221 -0
- azure/ai/evaluation/_evaluators/_tool_selection/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_selection/_tool_selection.py +266 -0
- azure/ai/evaluation/_evaluators/_tool_selection/tool_selection.prompty +104 -0
- azure/ai/evaluation/_evaluators/_tool_success/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_success/_tool_success.py +301 -0
- azure/ai/evaluation/_evaluators/_tool_success/tool_success.prompty +321 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +102 -0
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +109 -107
- azure/ai/evaluation/_exceptions.py +51 -7
- azure/ai/evaluation/_http_utils.py +210 -137
- azure/ai/evaluation/_legacy/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_adapters/__init__.py +7 -0
- azure/ai/evaluation/_legacy/_adapters/_check.py +17 -0
- azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
- azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
- azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
- azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
- azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
- azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
- azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
- azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
- azure/ai/evaluation/_legacy/_batch_engine/__init__.py +9 -0
- azure/ai/evaluation/_legacy/_batch_engine/_config.py +48 -0
- azure/ai/evaluation/_legacy/_batch_engine/_engine.py +477 -0
- azure/ai/evaluation/_legacy/_batch_engine/_exceptions.py +88 -0
- azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +132 -0
- azure/ai/evaluation/_legacy/_batch_engine/_result.py +107 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run.py +127 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_storage.py +128 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +262 -0
- azure/ai/evaluation/_legacy/_batch_engine/_status.py +25 -0
- azure/ai/evaluation/_legacy/_batch_engine/_trace.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils_deprecated.py +131 -0
- azure/ai/evaluation/_legacy/_common/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_common/_async_token_provider.py +117 -0
- azure/ai/evaluation/_legacy/_common/_logging.py +292 -0
- azure/ai/evaluation/_legacy/_common/_thread_pool_executor_with_context.py +17 -0
- azure/ai/evaluation/_legacy/prompty/__init__.py +36 -0
- azure/ai/evaluation/_legacy/prompty/_connection.py +119 -0
- azure/ai/evaluation/_legacy/prompty/_exceptions.py +139 -0
- azure/ai/evaluation/_legacy/prompty/_prompty.py +430 -0
- azure/ai/evaluation/_legacy/prompty/_utils.py +663 -0
- azure/ai/evaluation/_legacy/prompty/_yaml_utils.py +99 -0
- azure/ai/evaluation/_model_configurations.py +130 -8
- azure/ai/evaluation/_safety_evaluation/__init__.py +3 -0
- azure/ai/evaluation/_safety_evaluation/_generated_rai_client.py +0 -0
- azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +917 -0
- azure/ai/evaluation/_user_agent.py +32 -1
- azure/ai/evaluation/_vendor/__init__.py +3 -0
- azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +324 -0
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +59 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +59 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
- azure/ai/evaluation/_version.py +2 -1
- azure/ai/evaluation/red_team/__init__.py +22 -0
- azure/ai/evaluation/red_team/_agent/__init__.py +3 -0
- azure/ai/evaluation/red_team/_agent/_agent_functions.py +261 -0
- azure/ai/evaluation/red_team/_agent/_agent_tools.py +461 -0
- azure/ai/evaluation/red_team/_agent/_agent_utils.py +89 -0
- azure/ai/evaluation/red_team/_agent/_semantic_kernel_plugin.py +228 -0
- azure/ai/evaluation/red_team/_attack_objective_generator.py +268 -0
- azure/ai/evaluation/red_team/_attack_strategy.py +49 -0
- azure/ai/evaluation/red_team/_callback_chat_target.py +115 -0
- azure/ai/evaluation/red_team/_default_converter.py +21 -0
- azure/ai/evaluation/red_team/_evaluation_processor.py +505 -0
- azure/ai/evaluation/red_team/_mlflow_integration.py +430 -0
- azure/ai/evaluation/red_team/_orchestrator_manager.py +803 -0
- azure/ai/evaluation/red_team/_red_team.py +1717 -0
- azure/ai/evaluation/red_team/_red_team_result.py +661 -0
- azure/ai/evaluation/red_team/_result_processor.py +1708 -0
- azure/ai/evaluation/red_team/_utils/__init__.py +37 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +128 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_target.py +601 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_true_false_scorer.py +114 -0
- azure/ai/evaluation/red_team/_utils/constants.py +72 -0
- azure/ai/evaluation/red_team/_utils/exception_utils.py +345 -0
- azure/ai/evaluation/red_team/_utils/file_utils.py +266 -0
- azure/ai/evaluation/red_team/_utils/formatting_utils.py +365 -0
- azure/ai/evaluation/red_team/_utils/logging_utils.py +139 -0
- azure/ai/evaluation/red_team/_utils/metric_mapping.py +73 -0
- azure/ai/evaluation/red_team/_utils/objective_utils.py +46 -0
- azure/ai/evaluation/red_team/_utils/progress_utils.py +252 -0
- azure/ai/evaluation/red_team/_utils/retry_utils.py +218 -0
- azure/ai/evaluation/red_team/_utils/strategy_utils.py +218 -0
- azure/ai/evaluation/simulator/__init__.py +2 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +26 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +270 -144
- azure/ai/evaluation/simulator/_constants.py +12 -1
- azure/ai/evaluation/simulator/_conversation/__init__.py +151 -23
- azure/ai/evaluation/simulator/_conversation/_conversation.py +10 -6
- azure/ai/evaluation/simulator/_conversation/constants.py +1 -1
- azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
- azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +54 -75
- azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
- azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +1 -0
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +26 -5
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +145 -104
- azure/ai/evaluation/simulator/_model_tools/__init__.py +2 -1
- azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +225 -0
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +80 -30
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +117 -45
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +109 -7
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +97 -33
- azure/ai/evaluation/simulator/_model_tools/models.py +30 -27
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +6 -10
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -5
- azure/ai/evaluation/simulator/_simulator.py +302 -208
- azure/ai/evaluation/simulator/_utils.py +31 -13
- azure_ai_evaluation-1.13.3.dist-info/METADATA +939 -0
- azure_ai_evaluation-1.13.3.dist-info/RECORD +305 -0
- {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/WHEEL +1 -1
- azure_ai_evaluation-1.13.3.dist-info/licenses/NOTICE.txt +70 -0
- azure/ai/evaluation/_evaluate/_batch_run_client/proxy_client.py +0 -71
- azure/ai/evaluation/_evaluators/_chat/_chat.py +0 -357
- azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +0 -157
- azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +0 -48
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +0 -65
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -301
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -54
- azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +0 -5
- azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +0 -104
- azure/ai/evaluation/simulator/_tracing.py +0 -89
- azure_ai_evaluation-1.0.0b2.dist-info/METADATA +0 -449
- azure_ai_evaluation-1.0.0b2.dist-info/RECORD +0 -99
- {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,661 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from typing import Any, Dict, List, Optional, TypedDict
|
|
5
|
+
import json
|
|
6
|
+
from azure.ai.evaluation._common._experimental import experimental
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# NOTE: The RedTeam* TypedDict classes below are being gradually deprecated in favor of
|
|
10
|
+
# the standard EvalRun* models from azure.ai.evaluation._common.onedp.models._models.
|
|
11
|
+
# New code should use EvalRunOutputItem instead of RedTeamOutputItem, etc.
|
|
12
|
+
# These TypedDicts are maintained for backward compatibility during the transition.
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@experimental
|
|
16
|
+
class RiskCategorySummary(TypedDict):
|
|
17
|
+
"""Summary of attack success rates across risk categories.
|
|
18
|
+
|
|
19
|
+
:param overall_asr: Overall attack success rate across all risk categories
|
|
20
|
+
:type overall_asr: float
|
|
21
|
+
:param overall_total: Total number of attacks across all risk categories
|
|
22
|
+
:type overall_total: int
|
|
23
|
+
:param overall_successful_attacks: Number of successful attacks across all risk categories
|
|
24
|
+
:type overall_successful_attacks: int
|
|
25
|
+
:param hate_unfairness_asr: Attack success rate for hate/unfairness risk category
|
|
26
|
+
:type hate_unfairness_asr: float
|
|
27
|
+
:param hate_unfairness_total: Total number of attacks for hate/unfairness risk category
|
|
28
|
+
:type hate_unfairness_total: int
|
|
29
|
+
:param hate_unfairness_successful_attacks: Number of successful attacks for hate/unfairness risk category
|
|
30
|
+
:type hate_unfairness_successful_attacks: int
|
|
31
|
+
:param violence_asr: Attack success rate for violence risk category
|
|
32
|
+
:type violence_asr: float
|
|
33
|
+
:param violence_total: Total number of attacks for violence risk category
|
|
34
|
+
:type violence_total: int
|
|
35
|
+
:param violence_successful_attacks: Number of successful attacks for violence risk category
|
|
36
|
+
:type violence_successful_attacks: int
|
|
37
|
+
:param sexual_asr: Attack success rate for sexual risk category
|
|
38
|
+
:type sexual_asr: float
|
|
39
|
+
:param sexual_total: Total number of attacks for sexual risk category
|
|
40
|
+
:type sexual_total: int
|
|
41
|
+
:param sexual_successful_attacks: Number of successful attacks for sexual risk category
|
|
42
|
+
:type sexual_successful_attacks: int
|
|
43
|
+
:param self_harm_asr: Attack success rate for self-harm risk category
|
|
44
|
+
:type self_harm_asr: float
|
|
45
|
+
:param self_harm_total: Total number of attacks for self-harm risk category
|
|
46
|
+
:type self_harm_total: int
|
|
47
|
+
:param self_harm_successful_attacks: Number of successful attacks for self-harm risk category
|
|
48
|
+
:type self_harm_successful_attacks: int
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
overall_asr: float
|
|
52
|
+
overall_total: int
|
|
53
|
+
overall_successful_attacks: int
|
|
54
|
+
hate_unfairness_asr: float
|
|
55
|
+
hate_unfairness_total: int
|
|
56
|
+
hate_unfairness_successful_attacks: int
|
|
57
|
+
violence_asr: float
|
|
58
|
+
violence_total: int
|
|
59
|
+
violence_successful_attacks: int
|
|
60
|
+
sexual_asr: float
|
|
61
|
+
sexual_total: int
|
|
62
|
+
sexual_successful_attacks: int
|
|
63
|
+
self_harm_asr: float
|
|
64
|
+
self_harm_total: int
|
|
65
|
+
self_harm_successful_attacks: int
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@experimental
|
|
69
|
+
class AttackTechniqueSummary(TypedDict):
|
|
70
|
+
"""Summary of attack success rates across complexity levels.
|
|
71
|
+
|
|
72
|
+
:param overall_asr: Overall attack success rate across all complexity levels
|
|
73
|
+
:type overall_asr: float
|
|
74
|
+
:param overall_total: Total number of attacks across all complexity levels
|
|
75
|
+
:type overall_total: int
|
|
76
|
+
:param overall_successful_attacks: Number of successful attacks across all complexity levels
|
|
77
|
+
:type overall_successful_attacks: int
|
|
78
|
+
:param baseline_asr: Attack success rate for baseline complexity level
|
|
79
|
+
:type baseline_asr: float
|
|
80
|
+
:param baseline_total: Total number of attacks for baseline complexity level
|
|
81
|
+
:type baseline_total: int
|
|
82
|
+
:param baseline_successful_attacks: Number of successful attacks for baseline complexity level
|
|
83
|
+
:type baseline_successful_attacks: int
|
|
84
|
+
:param easy_complexity_asr: Attack success rate for easy complexity level
|
|
85
|
+
:type easy_complexity_asr: float
|
|
86
|
+
:param easy_complexity_total: Total number of attacks for easy complexity level
|
|
87
|
+
:type easy_complexity_total: int
|
|
88
|
+
:param easy_complexity_successful_attacks: Number of successful attacks for easy complexity level
|
|
89
|
+
:type easy_complexity_successful_attacks: int
|
|
90
|
+
:param moderate_complexity_asr: Attack success rate for moderate complexity level
|
|
91
|
+
:type moderate_complexity_asr: float
|
|
92
|
+
:param moderate_complexity_total: Total number of attacks for moderate complexity level
|
|
93
|
+
:type moderate_complexity_total: int
|
|
94
|
+
:param moderate_complexity_successful_attacks: Number of successful attacks for moderate complexity level
|
|
95
|
+
:type moderate_complexity_successful_attacks: int
|
|
96
|
+
:param difficult_complexity_asr: Attack success rate for difficult complexity level
|
|
97
|
+
:type difficult_complexity_asr: float
|
|
98
|
+
:param difficult_complexity_total: Total number of attacks for difficult complexity level
|
|
99
|
+
:type difficult_complexity_total: int
|
|
100
|
+
:param difficult_complexity_successful_attacks: Number of successful attacks for difficult complexity level
|
|
101
|
+
:type difficult_complexity_successful_attacks: int
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
overall_asr: float
|
|
105
|
+
overall_total: int
|
|
106
|
+
overall_successful_attacks: int
|
|
107
|
+
baseline_asr: float
|
|
108
|
+
baseline_total: int
|
|
109
|
+
baseline_successful_attacks: int
|
|
110
|
+
easy_complexity_asr: float
|
|
111
|
+
easy_complexity_total: int
|
|
112
|
+
easy_complexity_successful_attacks: int
|
|
113
|
+
moderate_complexity_asr: float
|
|
114
|
+
moderate_complexity_total: int
|
|
115
|
+
moderate_complexity_successful_attacks: int
|
|
116
|
+
difficult_complexity_asr: float
|
|
117
|
+
difficult_complexity_total: int
|
|
118
|
+
difficult_complexity_successful_attacks: int
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@experimental
|
|
122
|
+
class JointRiskAttackSummaryItem(TypedDict):
|
|
123
|
+
"""Summary of attack success rates for a specific risk category across complexity levels.
|
|
124
|
+
|
|
125
|
+
:param risk_category: The risk category being summarized
|
|
126
|
+
:type risk_category: str
|
|
127
|
+
:param baseline_asr: Attack success rate for baseline complexity level
|
|
128
|
+
:type baseline_asr: float
|
|
129
|
+
:param easy_complexity_asr: Attack success rate for easy complexity level
|
|
130
|
+
:type easy_complexity_asr: float
|
|
131
|
+
:param moderate_complexity_asr: Attack success rate for moderate complexity level
|
|
132
|
+
:type moderate_complexity_asr: float
|
|
133
|
+
:param difficult_complexity_asr: Attack success rate for difficult complexity level
|
|
134
|
+
:type difficult_complexity_asr: float
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
risk_category: str
|
|
138
|
+
baseline_asr: float
|
|
139
|
+
easy_complexity_asr: float
|
|
140
|
+
moderate_complexity_asr: float
|
|
141
|
+
difficult_complexity_asr: float
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
@experimental
|
|
145
|
+
class RedTeamingScorecard(TypedDict):
|
|
146
|
+
"""TypedDict representation of a Red Team Agent scorecard with the updated structure.
|
|
147
|
+
|
|
148
|
+
:param risk_category_summary: Overall metrics by risk category
|
|
149
|
+
:type risk_category_summary: List[RiskCategorySummary]
|
|
150
|
+
:param attack_technique_summary: Overall metrics by attack technique complexity
|
|
151
|
+
:type attack_technique_summary: List[AttackTechniqueSummary]
|
|
152
|
+
:param joint_risk_attack_summary: Detailed metrics by risk category and complexity level
|
|
153
|
+
:type joint_risk_attack_summary: List[JointRiskAttackSummaryItem]
|
|
154
|
+
:param detailed_joint_risk_attack_asr: Detailed ASR information broken down by complexity level, risk category, and converter
|
|
155
|
+
:type detailed_joint_risk_attack_asr: Dict[str, Dict[str, Dict[str, float]]]
|
|
156
|
+
"""
|
|
157
|
+
|
|
158
|
+
risk_category_summary: List[RiskCategorySummary]
|
|
159
|
+
attack_technique_summary: List[AttackTechniqueSummary]
|
|
160
|
+
joint_risk_attack_summary: List[JointRiskAttackSummaryItem]
|
|
161
|
+
detailed_joint_risk_attack_asr: Dict[str, Dict[str, Dict[str, float]]]
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@experimental
|
|
165
|
+
class AttackObjectiveSource(TypedDict):
|
|
166
|
+
"""Information about how attack objectives were generated.
|
|
167
|
+
|
|
168
|
+
:param application_scenario: The application scenario used for generating attack objectives
|
|
169
|
+
:type application_scenario: str
|
|
170
|
+
:param risk_categories: List of risk categories targeted by the attack objectives
|
|
171
|
+
:type risk_categories: List[str]
|
|
172
|
+
:param custom_attack_seed_prompts: Custom prompts used to seed attack objective generation
|
|
173
|
+
:type custom_attack_seed_prompts: str
|
|
174
|
+
:param policy_document: Policy document used for generating attack objectives
|
|
175
|
+
:type policy_document: str
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
application_scenario: str
|
|
179
|
+
risk_categories: List[str]
|
|
180
|
+
custom_attack_seed_prompts: str
|
|
181
|
+
policy_document: str
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
@experimental
|
|
185
|
+
class RedTeamingParameters(TypedDict):
|
|
186
|
+
"""TypedDict representation of parameters used in a red team evaluation with the updated structure.
|
|
187
|
+
|
|
188
|
+
:param attack_objective_generated_from: Information about the source of attack objectives
|
|
189
|
+
:type attack_objective_generated_from: AttackObjectiveSource
|
|
190
|
+
:param attack_complexity: List of complexity levels used in the attacks
|
|
191
|
+
:type attack_complexity: List[str]
|
|
192
|
+
:param techniques_used: Dictionary mapping complexity levels to lists of converter techniques used
|
|
193
|
+
:type techniques_used: Dict[str, List[str]]
|
|
194
|
+
:param attack_success_thresholds: Thresholds used for determining attack success per risk category
|
|
195
|
+
:type attack_success_thresholds: Dict[str, Any]
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
attack_objective_generated_from: AttackObjectiveSource
|
|
199
|
+
attack_complexity: List[str]
|
|
200
|
+
techniques_used: Dict[str, List[str]]
|
|
201
|
+
attack_success_thresholds: Dict[str, Dict[str, int]]
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
@experimental
|
|
205
|
+
class RiskAssessment(TypedDict):
|
|
206
|
+
"""Assessment of content safety risks in a conversation.
|
|
207
|
+
|
|
208
|
+
:param violence: Assessment of violence-related content
|
|
209
|
+
:type violence: Dict[str, str]
|
|
210
|
+
:param sexual: Assessment of sexual content
|
|
211
|
+
:type sexual: Dict[str, str]
|
|
212
|
+
:param self_harm: Assessment of self-harm content
|
|
213
|
+
:type self_harm: Dict[str, str]
|
|
214
|
+
:param hate_unfair: Assessment of hate or unfairness content
|
|
215
|
+
:type hate_unfair: Dict[str, str]
|
|
216
|
+
"""
|
|
217
|
+
|
|
218
|
+
violence: Dict[str, str]
|
|
219
|
+
sexual: Dict[str, str]
|
|
220
|
+
self_harm: Dict[str, str]
|
|
221
|
+
hate_unfair: Dict[str, str]
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
@experimental
|
|
225
|
+
class AttackDetails(TypedDict, total=False):
|
|
226
|
+
"""TypedDict representation of a simulated conversation in a red team evaluation.
|
|
227
|
+
|
|
228
|
+
:param attack_success: Whether the attack was successful
|
|
229
|
+
:type attack_success: Optional[bool]
|
|
230
|
+
:param attack_technique: The attack technique used in the conversation
|
|
231
|
+
:type attack_technique: str
|
|
232
|
+
:param attack_complexity: The complexity level of the attack
|
|
233
|
+
:type attack_complexity: str
|
|
234
|
+
:param risk_category: The risk category targeted by the attack
|
|
235
|
+
:type risk_category: str
|
|
236
|
+
:param conversation: List of messages exchanged in the conversation
|
|
237
|
+
:type conversation: List[Dict[str, str]]
|
|
238
|
+
:param risk_assessment: Dictionary containing content safety assessment for the conversation
|
|
239
|
+
:type risk_assessment: Optional[RiskAssessment]
|
|
240
|
+
:param attack_success_threshold: The threshold value used to determine attack success
|
|
241
|
+
:type attack_success_threshold: Optional[int]
|
|
242
|
+
:param risk_sub_type: Optional risk sub-category/sub-type for the attack
|
|
243
|
+
:type risk_sub_type: Optional[str]
|
|
244
|
+
"""
|
|
245
|
+
|
|
246
|
+
attack_success: Optional[bool]
|
|
247
|
+
attack_technique: str
|
|
248
|
+
attack_complexity: str
|
|
249
|
+
risk_category: str
|
|
250
|
+
conversation: List[Dict[str, str]]
|
|
251
|
+
risk_assessment: Optional[RiskAssessment]
|
|
252
|
+
attack_success_threshold: Optional[int]
|
|
253
|
+
risk_sub_type: Optional[str]
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
@experimental
|
|
257
|
+
class RedTeamOutputResultProperties(TypedDict, total=False):
|
|
258
|
+
"""Additional metadata captured for each evaluation result."""
|
|
259
|
+
|
|
260
|
+
attack_success: Optional[bool]
|
|
261
|
+
attack_success_threshold: Optional[int]
|
|
262
|
+
attack_technique: str
|
|
263
|
+
attack_complexity: str
|
|
264
|
+
risk_category: str
|
|
265
|
+
risk_assessment: Optional[Dict[str, Any]]
|
|
266
|
+
reason: Optional[str]
|
|
267
|
+
severity_label: Optional[str]
|
|
268
|
+
metadata: Optional[Dict[str, Any]]
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
@experimental
|
|
272
|
+
class EvaluationRunOutputItemMessage(TypedDict, total=False):
|
|
273
|
+
"""Representation of a single message within an evaluation sample."""
|
|
274
|
+
|
|
275
|
+
role: str
|
|
276
|
+
content: Any
|
|
277
|
+
name: Optional[str]
|
|
278
|
+
tool_calls: Optional[List[Dict[str, Any]]]
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
@experimental
|
|
282
|
+
class RedTeamRunOutputItemResult(TypedDict, total=False):
|
|
283
|
+
"""Flattened evaluation result for a single risk category.
|
|
284
|
+
|
|
285
|
+
:param passed: Indicates whether the attack was unsuccessful (True) or successful (False).
|
|
286
|
+
In the context of attack success rate (ASR), passed=True means the system
|
|
287
|
+
successfully defended against the attack, while passed=False means the attack
|
|
288
|
+
succeeded in compromising the system.
|
|
289
|
+
:type passed: Optional[bool]
|
|
290
|
+
:param label: String label "pass" or "fail" that aligns with the passed field
|
|
291
|
+
:type label: Optional[str]
|
|
292
|
+
"""
|
|
293
|
+
|
|
294
|
+
# Should extend EvaluationRunOutputItemResult
|
|
295
|
+
|
|
296
|
+
object: str
|
|
297
|
+
type: str
|
|
298
|
+
name: str
|
|
299
|
+
passed: Optional[bool]
|
|
300
|
+
label: Optional[str]
|
|
301
|
+
score: Optional[float]
|
|
302
|
+
metric: Optional[str]
|
|
303
|
+
threshold: Optional[float]
|
|
304
|
+
reason: Optional[str]
|
|
305
|
+
properties: RedTeamOutputResultProperties
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
@experimental
|
|
309
|
+
class RedTeamDatasourceItem(TypedDict, total=False):
|
|
310
|
+
"""Metadata about the datasource item that produced this conversation."""
|
|
311
|
+
|
|
312
|
+
id: Optional[str]
|
|
313
|
+
input_data: Dict[str, Any]
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
@experimental
|
|
317
|
+
class RedTeamRunOutputItemSample(TypedDict, total=False):
|
|
318
|
+
"""Sample payload containing the red team conversation.
|
|
319
|
+
|
|
320
|
+
:param error: Error information from either the evaluation step or while calling the target system.
|
|
321
|
+
Contains details about any failures that occurred during the attack simulation or
|
|
322
|
+
evaluation process.
|
|
323
|
+
:type error: Optional[Dict[str, Any]]
|
|
324
|
+
"""
|
|
325
|
+
|
|
326
|
+
# Should extend EvaluationRunOutputItemSample
|
|
327
|
+
|
|
328
|
+
object: str
|
|
329
|
+
input: List[EvaluationRunOutputItemMessage]
|
|
330
|
+
output: List[EvaluationRunOutputItemMessage]
|
|
331
|
+
finish_reason: Optional[str]
|
|
332
|
+
model: Optional[str]
|
|
333
|
+
error: Optional[Dict[str, Any]]
|
|
334
|
+
usage: Optional[Dict[str, Any]]
|
|
335
|
+
seed: Optional[int]
|
|
336
|
+
temperature: Optional[float]
|
|
337
|
+
top_p: Optional[float]
|
|
338
|
+
max_completion_tokens: Optional[float]
|
|
339
|
+
metadata: Optional[Dict[str, Any]]
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
@experimental
|
|
343
|
+
class RedTeamOutputItem(TypedDict, total=False):
|
|
344
|
+
"""Structured representation of a conversation and its evaluation artifacts.
|
|
345
|
+
|
|
346
|
+
DEPRECATED: This TypedDict duplicates the EvalRunOutputItem model from
|
|
347
|
+
azure.ai.evaluation._common.onedp.models._models. New code should use
|
|
348
|
+
EvalRunOutputItem directly instead of this TypedDict wrapper.
|
|
349
|
+
"""
|
|
350
|
+
|
|
351
|
+
object: str
|
|
352
|
+
id: str
|
|
353
|
+
created_time: int
|
|
354
|
+
status: str
|
|
355
|
+
datasource_item_id: Optional[str]
|
|
356
|
+
datasource_item: Optional[RedTeamDatasourceItem]
|
|
357
|
+
sample: RedTeamRunOutputItemSample
|
|
358
|
+
results: List[RedTeamRunOutputItemResult]
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
@experimental
|
|
362
|
+
class ScanResult(TypedDict):
|
|
363
|
+
"""TypedDict representation of a Red Team Agent evaluation result with the updated structure.
|
|
364
|
+
|
|
365
|
+
:param scorecard: Scorecard containing summary and detailed ASR information
|
|
366
|
+
:type scorecard: RedTeamingScorecard
|
|
367
|
+
:param parameters: Parameters containing metadata about the evaluation run
|
|
368
|
+
:type parameters: RedTeamingParameters
|
|
369
|
+
:param attack_details: List of AttackDetails objects representing the conversations in the evaluation
|
|
370
|
+
:type attack_details: List[AttackDetails]
|
|
371
|
+
:param AOAI_Compatible_Row_Results: List of evaluation results for each risk category
|
|
372
|
+
:type AOAI_Compatible_Row_Results: Optional[List[RedTeamRunOutputItemResult]]
|
|
373
|
+
:param AOAI_Compatible_Summary: The evaluation run metadata in eval.run format
|
|
374
|
+
:type AOAI_Compatible_Summary: Optional[RedTeamRun]
|
|
375
|
+
:param studio_url: Optional URL for the studio
|
|
376
|
+
:type studio_url: Optional[str]
|
|
377
|
+
"""
|
|
378
|
+
|
|
379
|
+
scorecard: RedTeamingScorecard
|
|
380
|
+
parameters: RedTeamingParameters
|
|
381
|
+
attack_details: List[AttackDetails]
|
|
382
|
+
AOAI_Compatible_Row_Results: Optional[List[RedTeamRunOutputItemResult]]
|
|
383
|
+
AOAI_Compatible_Summary: Optional["RedTeamRun"]
|
|
384
|
+
studio_url: Optional[str]
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
@experimental
|
|
388
|
+
class ResultCount(TypedDict):
|
|
389
|
+
"""Count of evaluation results by status.
|
|
390
|
+
|
|
391
|
+
:param total: Total number of evaluation results
|
|
392
|
+
:type total: int
|
|
393
|
+
:param passed: Number of evaluation results where the attack was unsuccessful (system defended successfully).
|
|
394
|
+
In the context of attack success rate (ASR), this represents attacks that failed.
|
|
395
|
+
:type passed: int
|
|
396
|
+
:param failed: Number of evaluation results where the attack was successful (system was compromised).
|
|
397
|
+
In the context of attack success rate (ASR), this represents successful attacks.
|
|
398
|
+
:type failed: int
|
|
399
|
+
:param errored: Number of evaluation results that failed to process due to errors.
|
|
400
|
+
Calculated from rows where the sample's error property is set, indicating failures
|
|
401
|
+
during the attack simulation or evaluation step.
|
|
402
|
+
:type errored: int
|
|
403
|
+
"""
|
|
404
|
+
|
|
405
|
+
total: int
|
|
406
|
+
passed: int
|
|
407
|
+
failed: int
|
|
408
|
+
errored: int
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
@experimental
|
|
412
|
+
class PerTestingCriteriaResult(TypedDict, total=False):
|
|
413
|
+
"""Result count for a specific testing criteria.
|
|
414
|
+
|
|
415
|
+
:param testing_criteria: The name of the testing criteria (e.g., risk category)
|
|
416
|
+
:type testing_criteria: str
|
|
417
|
+
:param attack_strategy: The attack strategy used (optional, for attack strategy summaries)
|
|
418
|
+
:type attack_strategy: Optional[str]
|
|
419
|
+
:param passed: Number of results where the attack was unsuccessful for this criteria.
|
|
420
|
+
In the context of attack success rate (ASR), this represents attacks that failed
|
|
421
|
+
to compromise the system for this specific testing criteria.
|
|
422
|
+
:type passed: int
|
|
423
|
+
:param failed: Number of results where the attack was successful for this criteria.
|
|
424
|
+
In the context of attack success rate (ASR), this represents successful attacks
|
|
425
|
+
for this specific testing criteria.
|
|
426
|
+
:type failed: int
|
|
427
|
+
"""
|
|
428
|
+
|
|
429
|
+
testing_criteria: str
|
|
430
|
+
attack_strategy: Optional[str]
|
|
431
|
+
passed: int
|
|
432
|
+
failed: int
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
@experimental
|
|
436
|
+
class DataSourceItemGenerationParams(TypedDict, total=False):
|
|
437
|
+
"""Parameters for data source item generation.
|
|
438
|
+
|
|
439
|
+
:param type: Type of data source generation (e.g., "red_team")
|
|
440
|
+
:type type: str
|
|
441
|
+
:param attack_strategies: List of attack strategies used
|
|
442
|
+
:type attack_strategies: List[str]
|
|
443
|
+
:param num_turns: Number of turns in the conversation
|
|
444
|
+
:type num_turns: int
|
|
445
|
+
"""
|
|
446
|
+
|
|
447
|
+
type: str
|
|
448
|
+
attack_strategies: List[str]
|
|
449
|
+
num_turns: int
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
@experimental
|
|
453
|
+
class DataSource(TypedDict, total=False):
|
|
454
|
+
"""Data source information for the red team evaluation.
|
|
455
|
+
|
|
456
|
+
:param type: Type of data source (e.g., "azure_ai_red_team")
|
|
457
|
+
:type type: str
|
|
458
|
+
:param target: Target configuration for the data source
|
|
459
|
+
:type target: Dict[str, Any]
|
|
460
|
+
:param item_generation_params: Parameters used for generating data items
|
|
461
|
+
:type item_generation_params: DataSourceItemGenerationParams
|
|
462
|
+
"""
|
|
463
|
+
|
|
464
|
+
type: str
|
|
465
|
+
target: Dict[str, Any]
|
|
466
|
+
item_generation_params: DataSourceItemGenerationParams
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
@experimental
|
|
470
|
+
class OutputItemsList(TypedDict):
|
|
471
|
+
"""Wrapper for list of output items.
|
|
472
|
+
|
|
473
|
+
:param object: Object type identifier (always "list")
|
|
474
|
+
:type object: str
|
|
475
|
+
:param data: List of red team output items
|
|
476
|
+
:type data: List[RedTeamOutputItem]
|
|
477
|
+
"""
|
|
478
|
+
|
|
479
|
+
object: str
|
|
480
|
+
data: List[RedTeamOutputItem]
|
|
481
|
+
|
|
482
|
+
|
|
483
|
+
@experimental
|
|
484
|
+
class RedTeamRun(TypedDict, total=False):
|
|
485
|
+
"""TypedDict representation of a Red Team evaluation run in eval.run format.
|
|
486
|
+
|
|
487
|
+
:param object: Object type identifier (always "eval.run")
|
|
488
|
+
:type object: str
|
|
489
|
+
:param id: Unique identifier for the run
|
|
490
|
+
:type id: str
|
|
491
|
+
:param eval_id: Identifier for the evaluation experiment
|
|
492
|
+
:type eval_id: str
|
|
493
|
+
:param created_at: Timestamp when the run was created (Unix epoch seconds)
|
|
494
|
+
:type created_at: int
|
|
495
|
+
:param status: Status of the run (e.g., "completed", "failed", "in_progress")
|
|
496
|
+
:type status: str
|
|
497
|
+
:param name: Display name for the run
|
|
498
|
+
:type name: str
|
|
499
|
+
:param report_url: URL to view the run report in Azure AI Studio
|
|
500
|
+
:type report_url: Optional[str]
|
|
501
|
+
:param data_source: Information about the data source used for the evaluation
|
|
502
|
+
:type data_source: DataSource
|
|
503
|
+
:param metadata: Additional metadata for the run
|
|
504
|
+
:type metadata: Dict[str, Any]
|
|
505
|
+
:param result_counts: Aggregated counts of evaluation results
|
|
506
|
+
:type result_counts: ResultCount
|
|
507
|
+
:param per_model_usage: Usage statistics per model (if applicable)
|
|
508
|
+
:type per_model_usage: List[Any]
|
|
509
|
+
:param per_testing_criteria_results: Results aggregated by testing criteria
|
|
510
|
+
:type per_testing_criteria_results: List[PerTestingCriteriaResult]
|
|
511
|
+
:param output_items: Wrapped list of output items from the evaluation
|
|
512
|
+
:type output_items: OutputItemsList
|
|
513
|
+
:param conversations: Optional list of attack details/conversations
|
|
514
|
+
:type conversations: List[AttackDetails]
|
|
515
|
+
"""
|
|
516
|
+
|
|
517
|
+
object: str
|
|
518
|
+
id: str
|
|
519
|
+
eval_id: str
|
|
520
|
+
created_at: int
|
|
521
|
+
status: str
|
|
522
|
+
name: str
|
|
523
|
+
report_url: Optional[str]
|
|
524
|
+
data_source: DataSource
|
|
525
|
+
metadata: Dict[str, Any]
|
|
526
|
+
result_counts: ResultCount
|
|
527
|
+
per_model_usage: List[Any]
|
|
528
|
+
per_testing_criteria_results: List[PerTestingCriteriaResult]
|
|
529
|
+
output_items: OutputItemsList
|
|
530
|
+
conversations: List[AttackDetails]
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
@experimental
|
|
534
|
+
class RedTeamResult:
|
|
535
|
+
def __init__(self, scan_result: Optional[ScanResult] = None, attack_details: Optional[List[AttackDetails]] = None):
|
|
536
|
+
self.scan_result = scan_result
|
|
537
|
+
self.attack_details = attack_details
|
|
538
|
+
|
|
539
|
+
def to_json(self) -> str:
|
|
540
|
+
"""
|
|
541
|
+
Converts a RedTeamResult object to a JSON-serializable dictionary.
|
|
542
|
+
|
|
543
|
+
:returns: A string containing the RedTeamResult in JSON format.
|
|
544
|
+
:rtype: str
|
|
545
|
+
"""
|
|
546
|
+
return json.dumps(self.scan_result) if self.scan_result else ""
|
|
547
|
+
|
|
548
|
+
def to_scorecard(self) -> Optional[RedTeamingScorecard]:
|
|
549
|
+
"""Extracts the scorecard from a RedTeamResult object."""
|
|
550
|
+
return self.scan_result.get("scorecard", None) if self.scan_result else None
|
|
551
|
+
|
|
552
|
+
def to_eval_qr_json_lines(self) -> str:
|
|
553
|
+
"""Converts conversations in messages format to query-response format suitable for evaluation.
|
|
554
|
+
|
|
555
|
+
The output format follows the JSONL pattern with each line containing:
|
|
556
|
+
|
|
557
|
+
.. code-block:: javascript
|
|
558
|
+
|
|
559
|
+
{
|
|
560
|
+
"query": "user message content",
|
|
561
|
+
"response": "assistant message content",
|
|
562
|
+
"risk_category": "risk category",
|
|
563
|
+
"attack_strategy": "strategy name",
|
|
564
|
+
"attack_complexity": "complexity level",
|
|
565
|
+
"attack_success": "true|false", // (if available from evaluation)
|
|
566
|
+
"category": "risk category", // (if available from evaluation)
|
|
567
|
+
"severity_level": "low|medium|high", // (if available from evaluation)
|
|
568
|
+
"threshold": "threshold value" // (if available from evaluation)
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
:returns: A list of strings containing query-response pairs in JSONL format.
|
|
572
|
+
:rtype: List[str]
|
|
573
|
+
"""
|
|
574
|
+
if not self.attack_details:
|
|
575
|
+
return ""
|
|
576
|
+
|
|
577
|
+
result_lines = []
|
|
578
|
+
|
|
579
|
+
for conversation in self.attack_details:
|
|
580
|
+
messages = conversation.get("conversation", [])
|
|
581
|
+
attack_technique = conversation.get("attack_technique", "")
|
|
582
|
+
attack_complexity = conversation.get("attack_complexity", "")
|
|
583
|
+
risk_category = conversation.get("risk_category", "")
|
|
584
|
+
attack_success = conversation.get("attack_success")
|
|
585
|
+
risk_assessment = conversation.get("risk_assessment", {})
|
|
586
|
+
|
|
587
|
+
for i in range(0, len(messages) - 1, 2):
|
|
588
|
+
if i + 1 < len(messages):
|
|
589
|
+
user_msg = messages[i]
|
|
590
|
+
assistant_msg = messages[i + 1]
|
|
591
|
+
|
|
592
|
+
if user_msg.get("role") == "user" and assistant_msg.get("role") == "assistant":
|
|
593
|
+
# Create the base query-response pair
|
|
594
|
+
qr_pair = {
|
|
595
|
+
"query": user_msg.get("content", ""),
|
|
596
|
+
"response": assistant_msg.get("content", ""),
|
|
597
|
+
"risk_category": risk_category,
|
|
598
|
+
"attack_strategy": attack_technique,
|
|
599
|
+
"attack_complexity": attack_complexity,
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
# Add evaluation data if available
|
|
603
|
+
if attack_success is not None:
|
|
604
|
+
qr_pair["attack_success"] = str(attack_success).lower()
|
|
605
|
+
|
|
606
|
+
# Add risk assessment data if available
|
|
607
|
+
for category, assessment in risk_assessment.items() if risk_assessment else {}:
|
|
608
|
+
if assessment and assessment.get("severity_label", None):
|
|
609
|
+
qr_pair["category"] = category
|
|
610
|
+
qr_pair["severity_level"] = assessment.get("severity_label", "").lower()
|
|
611
|
+
# Add threshold if available in the future
|
|
612
|
+
|
|
613
|
+
result_lines.append(json.dumps(qr_pair))
|
|
614
|
+
|
|
615
|
+
return result_lines
|
|
616
|
+
|
|
617
|
+
def attack_simulation(self) -> str:
|
|
618
|
+
"""
|
|
619
|
+
Returns the attack simulation data in a human-readable format.
|
|
620
|
+
:returns: A string containing the attack simulation data in a human-readable format.
|
|
621
|
+
:rtype: str
|
|
622
|
+
"""
|
|
623
|
+
if not self.attack_details:
|
|
624
|
+
return ""
|
|
625
|
+
|
|
626
|
+
result_lines = []
|
|
627
|
+
|
|
628
|
+
for conversation in self.attack_details:
|
|
629
|
+
messages = conversation.get("conversation", [])
|
|
630
|
+
attack_technique = conversation.get("attack_technique", "")
|
|
631
|
+
attack_complexity = conversation.get("attack_complexity", "")
|
|
632
|
+
risk_category = conversation.get("risk_category", "")
|
|
633
|
+
attack_success = conversation.get("attack_success")
|
|
634
|
+
risk_assessment = conversation.get("risk_assessment", {})
|
|
635
|
+
|
|
636
|
+
result_lines.append(f"Attack Technique: {attack_technique}")
|
|
637
|
+
result_lines.append(f"Attack Complexity: {attack_complexity}")
|
|
638
|
+
result_lines.append(f"Risk Category: {risk_category}")
|
|
639
|
+
result_lines.append("")
|
|
640
|
+
|
|
641
|
+
for i in range(0, len(messages) - 1, 2):
|
|
642
|
+
if i + 1 < len(messages):
|
|
643
|
+
user_msg = messages[i]
|
|
644
|
+
assistant_msg = messages[i + 1]
|
|
645
|
+
|
|
646
|
+
if user_msg.get("role") == "user" and assistant_msg.get("role") == "assistant":
|
|
647
|
+
result_lines.append(f"User: {user_msg.get('content', '')}")
|
|
648
|
+
result_lines.append(f"Assistant: {assistant_msg.get('content', '')}")
|
|
649
|
+
result_lines.append("")
|
|
650
|
+
|
|
651
|
+
if attack_success is not None:
|
|
652
|
+
result_lines.append(f"Attack Success: {'Successful' if attack_success else 'Failed'}")
|
|
653
|
+
result_lines.append("")
|
|
654
|
+
|
|
655
|
+
for category, assessment in risk_assessment.items() if risk_assessment else {}:
|
|
656
|
+
if assessment and assessment.get("severity_label", None):
|
|
657
|
+
result_lines.append(f"Category: {category}")
|
|
658
|
+
result_lines.append(f"Severity Level: {assessment.get('severity_label', '')}")
|
|
659
|
+
result_lines.append("")
|
|
660
|
+
|
|
661
|
+
return "\n".join(result_lines)
|