azure-ai-evaluation 1.7.0__py3-none-any.whl → 1.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. azure/ai/evaluation/__init__.py +13 -2
  2. azure/ai/evaluation/_aoai/__init__.py +1 -1
  3. azure/ai/evaluation/_aoai/aoai_grader.py +21 -11
  4. azure/ai/evaluation/_aoai/label_grader.py +3 -2
  5. azure/ai/evaluation/_aoai/score_model_grader.py +90 -0
  6. azure/ai/evaluation/_aoai/string_check_grader.py +3 -2
  7. azure/ai/evaluation/_aoai/text_similarity_grader.py +3 -2
  8. azure/ai/evaluation/_azure/_envs.py +9 -10
  9. azure/ai/evaluation/_azure/_token_manager.py +7 -1
  10. azure/ai/evaluation/_common/constants.py +11 -2
  11. azure/ai/evaluation/_common/evaluation_onedp_client.py +32 -26
  12. azure/ai/evaluation/_common/onedp/__init__.py +32 -32
  13. azure/ai/evaluation/_common/onedp/_client.py +136 -139
  14. azure/ai/evaluation/_common/onedp/_configuration.py +70 -73
  15. azure/ai/evaluation/_common/onedp/_patch.py +21 -21
  16. azure/ai/evaluation/_common/onedp/_utils/__init__.py +6 -0
  17. azure/ai/evaluation/_common/onedp/_utils/model_base.py +1232 -0
  18. azure/ai/evaluation/_common/onedp/_utils/serialization.py +2032 -0
  19. azure/ai/evaluation/_common/onedp/_validation.py +50 -50
  20. azure/ai/evaluation/_common/onedp/_version.py +9 -9
  21. azure/ai/evaluation/_common/onedp/aio/__init__.py +29 -29
  22. azure/ai/evaluation/_common/onedp/aio/_client.py +138 -143
  23. azure/ai/evaluation/_common/onedp/aio/_configuration.py +70 -75
  24. azure/ai/evaluation/_common/onedp/aio/_patch.py +21 -21
  25. azure/ai/evaluation/_common/onedp/aio/operations/__init__.py +37 -39
  26. azure/ai/evaluation/_common/onedp/aio/operations/_operations.py +4832 -4494
  27. azure/ai/evaluation/_common/onedp/aio/operations/_patch.py +21 -21
  28. azure/ai/evaluation/_common/onedp/models/__init__.py +168 -142
  29. azure/ai/evaluation/_common/onedp/models/_enums.py +230 -162
  30. azure/ai/evaluation/_common/onedp/models/_models.py +2685 -2228
  31. azure/ai/evaluation/_common/onedp/models/_patch.py +21 -21
  32. azure/ai/evaluation/_common/onedp/operations/__init__.py +37 -39
  33. azure/ai/evaluation/_common/onedp/operations/_operations.py +6106 -5655
  34. azure/ai/evaluation/_common/onedp/operations/_patch.py +21 -21
  35. azure/ai/evaluation/_common/rai_service.py +86 -50
  36. azure/ai/evaluation/_common/raiclient/__init__.py +1 -1
  37. azure/ai/evaluation/_common/raiclient/operations/_operations.py +14 -1
  38. azure/ai/evaluation/_common/utils.py +124 -3
  39. azure/ai/evaluation/_constants.py +2 -1
  40. azure/ai/evaluation/_converters/__init__.py +1 -1
  41. azure/ai/evaluation/_converters/_ai_services.py +9 -8
  42. azure/ai/evaluation/_converters/_models.py +46 -0
  43. azure/ai/evaluation/_converters/_sk_services.py +495 -0
  44. azure/ai/evaluation/_eval_mapping.py +2 -2
  45. azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +4 -4
  46. azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +2 -2
  47. azure/ai/evaluation/_evaluate/_evaluate.py +64 -58
  48. azure/ai/evaluation/_evaluate/_evaluate_aoai.py +130 -89
  49. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +0 -1
  50. azure/ai/evaluation/_evaluate/_utils.py +24 -15
  51. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +3 -3
  52. azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +12 -11
  53. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +5 -5
  54. azure/ai/evaluation/_evaluators/_common/_base_eval.py +15 -5
  55. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +24 -9
  56. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +6 -1
  57. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +13 -13
  58. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +7 -7
  59. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +7 -7
  60. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +7 -7
  61. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +6 -6
  62. azure/ai/evaluation/_evaluators/_document_retrieval/__init__.py +1 -5
  63. azure/ai/evaluation/_evaluators/_document_retrieval/_document_retrieval.py +34 -64
  64. azure/ai/evaluation/_evaluators/_eci/_eci.py +3 -3
  65. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +4 -4
  66. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +2 -2
  67. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +3 -3
  68. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +11 -7
  69. azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +30 -25
  70. azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +210 -96
  71. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +2 -3
  72. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +6 -6
  73. azure/ai/evaluation/_evaluators/_qa/_qa.py +4 -4
  74. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +8 -13
  75. azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +20 -25
  76. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +4 -4
  77. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +25 -25
  78. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +5 -5
  79. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +3 -3
  80. azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +11 -14
  81. azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +43 -34
  82. azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +3 -3
  83. azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +12 -11
  84. azure/ai/evaluation/_evaluators/_xpia/xpia.py +6 -6
  85. azure/ai/evaluation/_exceptions.py +10 -0
  86. azure/ai/evaluation/_http_utils.py +3 -3
  87. azure/ai/evaluation/_legacy/_batch_engine/_engine.py +3 -3
  88. azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +5 -2
  89. azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +5 -10
  90. azure/ai/evaluation/_legacy/_batch_engine/_utils.py +1 -4
  91. azure/ai/evaluation/_legacy/_common/_async_token_provider.py +12 -19
  92. azure/ai/evaluation/_legacy/_common/_thread_pool_executor_with_context.py +2 -0
  93. azure/ai/evaluation/_legacy/prompty/_prompty.py +11 -5
  94. azure/ai/evaluation/_safety_evaluation/__init__.py +1 -1
  95. azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +193 -111
  96. azure/ai/evaluation/_user_agent.py +32 -1
  97. azure/ai/evaluation/_version.py +1 -1
  98. azure/ai/evaluation/red_team/__init__.py +3 -1
  99. azure/ai/evaluation/red_team/_agent/__init__.py +3 -0
  100. azure/ai/evaluation/red_team/_agent/_agent_functions.py +261 -0
  101. azure/ai/evaluation/red_team/_agent/_agent_tools.py +461 -0
  102. azure/ai/evaluation/red_team/_agent/_agent_utils.py +89 -0
  103. azure/ai/evaluation/red_team/_agent/_semantic_kernel_plugin.py +228 -0
  104. azure/ai/evaluation/red_team/_attack_objective_generator.py +94 -52
  105. azure/ai/evaluation/red_team/_attack_strategy.py +4 -1
  106. azure/ai/evaluation/red_team/_callback_chat_target.py +4 -9
  107. azure/ai/evaluation/red_team/_default_converter.py +1 -1
  108. azure/ai/evaluation/red_team/_red_team.py +1622 -765
  109. azure/ai/evaluation/red_team/_red_team_result.py +43 -38
  110. azure/ai/evaluation/red_team/_utils/__init__.py +1 -1
  111. azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +121 -0
  112. azure/ai/evaluation/red_team/_utils/_rai_service_target.py +595 -0
  113. azure/ai/evaluation/red_team/_utils/_rai_service_true_false_scorer.py +108 -0
  114. azure/ai/evaluation/red_team/_utils/constants.py +6 -12
  115. azure/ai/evaluation/red_team/_utils/formatting_utils.py +41 -44
  116. azure/ai/evaluation/red_team/_utils/logging_utils.py +17 -17
  117. azure/ai/evaluation/red_team/_utils/metric_mapping.py +33 -6
  118. azure/ai/evaluation/red_team/_utils/strategy_utils.py +35 -25
  119. azure/ai/evaluation/simulator/_adversarial_scenario.py +2 -0
  120. azure/ai/evaluation/simulator/_adversarial_simulator.py +34 -16
  121. azure/ai/evaluation/simulator/_conversation/__init__.py +2 -2
  122. azure/ai/evaluation/simulator/_direct_attack_simulator.py +8 -8
  123. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +5 -5
  124. azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +54 -23
  125. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +7 -1
  126. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +25 -15
  127. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +19 -31
  128. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +20 -6
  129. azure/ai/evaluation/simulator/_model_tools/models.py +1 -1
  130. azure/ai/evaluation/simulator/_simulator.py +9 -8
  131. {azure_ai_evaluation-1.7.0.dist-info → azure_ai_evaluation-1.9.0.dist-info}/METADATA +24 -1
  132. {azure_ai_evaluation-1.7.0.dist-info → azure_ai_evaluation-1.9.0.dist-info}/RECORD +135 -123
  133. azure/ai/evaluation/_common/onedp/aio/_vendor.py +0 -40
  134. {azure_ai_evaluation-1.7.0.dist-info → azure_ai_evaluation-1.9.0.dist-info}/NOTICE.txt +0 -0
  135. {azure_ai_evaluation-1.7.0.dist-info → azure_ai_evaluation-1.9.0.dist-info}/WHEEL +0 -0
  136. {azure_ai_evaluation-1.7.0.dist-info → azure_ai_evaluation-1.9.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,261 @@
1
+ # ------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ------------------------------------
4
+
5
+ """
6
+ Red Team Tools as Functions for Azure AI Agent
7
+ This module provides functions that can be used as tools in Azure AI Agent for red teaming purposes.
8
+ """
9
+
10
+ import json
11
+ import asyncio
12
+ from typing import Any, Callable, Set, Optional
13
+ from azure.identity import DefaultAzureCredential
14
+ from azure.ai.evaluation.red_team._agent import RedTeamToolProvider
15
+
16
+
17
+ # Initialize the credential and tool provider (will be created when first needed)
18
+ credential = None
19
+ tool_provider = None
20
+ azure_ai_project = None
21
+ target_function = None
22
+
23
+
24
+ def _get_tool_provider() -> RedTeamToolProvider:
25
+ """Get or create the RedTeamToolProvider instance."""
26
+ global credential, tool_provider, azure_ai_project
27
+ if tool_provider is None:
28
+ credential = DefaultAzureCredential()
29
+ tool_provider = RedTeamToolProvider(
30
+ azure_ai_project=azure_ai_project,
31
+ credential=credential,
32
+ application_scenario="A customer service chatbot for a retail website", # Optional context
33
+ )
34
+ return tool_provider
35
+
36
+
37
+ # Dictionary to store fetched prompts for reference
38
+ fetched_prompts = {}
39
+
40
+
41
+ def red_team_fetch_harmful_prompt(
42
+ risk_category: str, strategy: str = "baseline", convert_with_strategy: Optional[str] = None
43
+ ) -> str:
44
+ """
45
+ Fetch a harmful prompt for a specific risk category to test content filters.
46
+
47
+ This function retrieves a prompt that could potentially trigger content filters
48
+ and can be used for red teaming AI systems.
49
+
50
+ :param risk_category (str): The risk category to get a harmful prompt for (e.g., 'violence', 'hate_unfairness', 'sexual', 'self_harm').
51
+ :param strategy (str, optional): The attack strategy to use (e.g., 'baseline', 'jailbreak'). Defaults to 'baseline'.
52
+ :param convert_with_strategy (Optional[str], optional): Optional strategy to convert the prompt (e.g., 'morse_converter', 'binary_converter'). Defaults to None.
53
+ :return: A JSON string containing the harmful prompt and related metadata.
54
+ :rtype: str
55
+ """
56
+ provider = _get_tool_provider()
57
+
58
+ # Run the async method in a new event loop
59
+ result = asyncio.run(
60
+ provider.fetch_harmful_prompt(
61
+ risk_category_text=risk_category, strategy=strategy, convert_with_strategy=convert_with_strategy
62
+ )
63
+ )
64
+
65
+ # Store the prompt for later conversion if successful
66
+ if result["status"] == "success" and "prompt_id" in result:
67
+ prompt_id = result["prompt_id"]
68
+ if "prompt" in result:
69
+ fetched_prompts[prompt_id] = result["prompt"]
70
+
71
+ return json.dumps(result)
72
+
73
+
74
+ def red_team_convert_prompt(prompt_or_id: str, strategy: str) -> str:
75
+ """
76
+ Convert a prompt or a previously fetched prompt ID using a specified strategy.
77
+
78
+ This function can be used to transform prompts in ways that might bypass content filters
79
+ for testing AI system defenses.
80
+
81
+ :param prompt_or_id (str): Either a prompt text or a prompt ID from a previous fetch_harmful_prompt call.
82
+ :param strategy (str): The strategy to use for conversion (e.g., 'morse_converter', 'binary_converter', 'base64_converter').
83
+ :return: A JSON string containing the original and converted prompt.
84
+ :rtype: str
85
+ """
86
+ provider = _get_tool_provider()
87
+
88
+ # Check if input is a prompt ID we have stored
89
+ if prompt_or_id in fetched_prompts:
90
+ # Update the provider's cache
91
+ provider._fetched_prompts[prompt_or_id] = fetched_prompts[prompt_or_id]
92
+
93
+ # Run the async method in a new event loop
94
+ result = asyncio.run(provider.convert_prompt(prompt_or_id=prompt_or_id, strategy=strategy))
95
+
96
+ return json.dumps(result)
97
+
98
+
99
+ def red_team_unified(category: str, strategy: Optional[str] = None) -> str:
100
+ """
101
+ Get a harmful prompt for a specific risk category and optionally convert it.
102
+
103
+ This function combines fetching and converting harmful prompts in one call,
104
+ making it easier to test AI system defenses.
105
+
106
+ :param category (str): The risk category to get a harmful prompt for (e.g., 'violence', 'hate_unfairness', 'sexual', 'self_harm').
107
+ :param strategy (Optional[str], optional): Optional strategy to convert the prompt (e.g., 'morse_converter', 'binary_converter'). Defaults to None.
108
+ :return: A JSON string containing the harmful prompt and related metadata.
109
+ :rtype: str
110
+ """
111
+ provider = _get_tool_provider()
112
+
113
+ # Run the async method in a new event loop
114
+ result = asyncio.run(provider.red_team(category=category, strategy=strategy))
115
+
116
+ # Store the prompt for later conversion if it's a success and we didn't convert it
117
+ if result["status"] == "success":
118
+ if "prompt_id" in result and "prompt" in result:
119
+ prompt_id = result["prompt_id"]
120
+ fetched_prompts[prompt_id] = result["prompt"]
121
+
122
+ return json.dumps(result)
123
+
124
+
125
+ def red_team_get_available_strategies() -> str:
126
+ """
127
+ Get a list of all available prompt conversion strategies.
128
+
129
+ This function lists all the strategies that can be used to convert prompts
130
+ for red teaming purposes.
131
+
132
+ :return: A JSON string containing a list of available conversion strategies.
133
+ :rtype: str
134
+ """
135
+ provider = _get_tool_provider()
136
+ strategies = provider.get_available_strategies()
137
+
138
+ return json.dumps({"status": "success", "available_strategies": strategies})
139
+
140
+
141
+ def red_team_explain_purpose() -> str:
142
+ """
143
+ Explain the purpose and responsible use of red teaming tools.
144
+
145
+ This function provides information about the ethical use of red teaming tools
146
+ and their purpose in improving AI system safety.
147
+
148
+ :return: A JSON string containing information about responsible use of red teaming tools.
149
+ :rtype: str
150
+ """
151
+ explanation = {
152
+ "purpose": "Red teaming tools are designed to test AI systems for vulnerabilities and safety issues.",
153
+ "responsible_use": [
154
+ "Only use these tools on systems you have permission to test.",
155
+ "Use the results to improve system safety and robustness.",
156
+ "Do not use these tools to cause harm or bypass legitimate safety measures in production systems.",
157
+ ],
158
+ "risk_categories": {
159
+ "violence": "Content related to physical harm, weapons, or dangerous activities",
160
+ "hate_unfairness": "Content related to discrimination, bias, or prejudice",
161
+ "sexual": "Explicit sexual content",
162
+ "self_harm": "Content related to suicide, self-injury, or harmful behaviors",
163
+ },
164
+ "conversion_strategies": "Various encoding and obfuscation methods to test if harmful content can bypass detection",
165
+ }
166
+
167
+ return json.dumps(explanation)
168
+
169
+
170
+ def red_team_send_to_target(prompt: str) -> str:
171
+ """
172
+ Send a prompt to the target function (e.g., call_ollama) and return the response.
173
+
174
+ This function allows sending prompts to an external model or service for
175
+ evaluation.
176
+
177
+ :param prompt (str): The prompt text to send to the target function.
178
+ :return: A JSON string containing the response from the target function.
179
+ :rtype: str
180
+ """
181
+ global target_function
182
+
183
+ if target_function is None:
184
+ return json.dumps(
185
+ {
186
+ "status": "error",
187
+ "message": "Target function not initialized. Make sure to pass a target_func to initialize_tool_provider.",
188
+ }
189
+ )
190
+
191
+ try:
192
+ # Call the target function with the prompt
193
+ response = target_function(prompt)
194
+
195
+ return json.dumps({"status": "success", "prompt": prompt, "response": response})
196
+ except Exception as e:
197
+ return json.dumps({"status": "error", "message": f"Error calling target function: {str(e)}", "prompt": prompt})
198
+
199
+
200
+ # Example User Input for Each Function
201
+ # 1. Red Team Fetch Harmful Prompt
202
+ # User Input: "Fetch a harmful prompt in the 'violence' category."
203
+ # User Input: "Fetch a harmful prompt in the 'hate_unfairness' category with a 'jailbreak' strategy."
204
+ # User Input: "Fetch a harmful prompt in the 'self_harm' category and convert it with 'base64_converter'."
205
+
206
+ # 2. Red Team Convert Prompt
207
+ # User Input: "Convert this prompt 'prompt_12345678' using the 'binary_converter' strategy."
208
+ # User Input: "Convert the text 'This is a test prompt' using the 'morse_converter' strategy."
209
+
210
+ # 3. Red Team Unified
211
+ # User Input: "Get a harmful prompt in the 'sexual' category."
212
+ # User Input: "Get a harmful prompt in the 'violence' category and convert it with 'leetspeak_converter'."
213
+
214
+ # 4. Red Team Get Available Strategies
215
+ # User Input: "What conversion strategies are available for red teaming?"
216
+
217
+ # 5. Red Team Explain Purpose
218
+ # User Input: "What is the purpose of red teaming tools and how should they be used responsibly?"
219
+
220
+ # Statically defined user functions for fast reference
221
+ user_functions: Set[Callable[..., Any]] = {
222
+ red_team_fetch_harmful_prompt,
223
+ red_team_convert_prompt,
224
+ red_team_unified,
225
+ red_team_get_available_strategies,
226
+ red_team_explain_purpose,
227
+ red_team_send_to_target,
228
+ }
229
+
230
+
231
+ def initialize_tool_provider(
232
+ projects_connection_string: str,
233
+ target_func: Optional[Callable[[str], str]] = None,
234
+ ) -> Set[Callable[..., Any]]:
235
+ """
236
+ Initialize the RedTeamToolProvider with the Azure AI project and credential.
237
+ This function is called when the module is imported.
238
+
239
+ :param projects_connection_string: The Azure AI project connection string.
240
+ :param target_func: A function that takes a string prompt and returns a string response.
241
+ :return: A set of callable functions that can be used as tools.
242
+ """
243
+ # projects_connection_string is in the format: connection_string;subscription_id;resource_group;project_name
244
+ # parse it to a dictionary called azure_ai_project
245
+ global azure_ai_project, credential, tool_provider, target_function
246
+
247
+ # Store the target function for later use
248
+ if target_func is not None:
249
+ globals()["target_function"] = target_func
250
+ azure_ai_project = {
251
+ "subscription_id": projects_connection_string.split(";")[1],
252
+ "resource_group_name": projects_connection_string.split(";")[2],
253
+ "project_name": projects_connection_string.split(";")[3],
254
+ }
255
+ if not credential:
256
+ credential = DefaultAzureCredential()
257
+ tool_provider = RedTeamToolProvider(
258
+ azure_ai_project=azure_ai_project,
259
+ credential=credential,
260
+ )
261
+ return user_functions