azure-ai-evaluation 1.3.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (142) hide show
  1. azure/ai/evaluation/__init__.py +27 -1
  2. azure/ai/evaluation/_azure/_models.py +6 -6
  3. azure/ai/evaluation/_common/constants.py +6 -2
  4. azure/ai/evaluation/_common/rai_service.py +39 -5
  5. azure/ai/evaluation/_common/raiclient/__init__.py +34 -0
  6. azure/ai/evaluation/_common/raiclient/_client.py +128 -0
  7. azure/ai/evaluation/_common/raiclient/_configuration.py +87 -0
  8. azure/ai/evaluation/_common/raiclient/_model_base.py +1235 -0
  9. azure/ai/evaluation/_common/raiclient/_patch.py +20 -0
  10. azure/ai/evaluation/_common/raiclient/_serialization.py +2050 -0
  11. azure/ai/evaluation/_common/raiclient/_version.py +9 -0
  12. azure/ai/evaluation/_common/raiclient/aio/__init__.py +29 -0
  13. azure/ai/evaluation/_common/raiclient/aio/_client.py +130 -0
  14. azure/ai/evaluation/_common/raiclient/aio/_configuration.py +87 -0
  15. azure/ai/evaluation/_common/raiclient/aio/_patch.py +20 -0
  16. azure/ai/evaluation/_common/raiclient/aio/operations/__init__.py +25 -0
  17. azure/ai/evaluation/_common/raiclient/aio/operations/_operations.py +981 -0
  18. azure/ai/evaluation/_common/raiclient/aio/operations/_patch.py +20 -0
  19. azure/ai/evaluation/_common/raiclient/models/__init__.py +60 -0
  20. azure/ai/evaluation/_common/raiclient/models/_enums.py +18 -0
  21. azure/ai/evaluation/_common/raiclient/models/_models.py +651 -0
  22. azure/ai/evaluation/_common/raiclient/models/_patch.py +20 -0
  23. azure/ai/evaluation/_common/raiclient/operations/__init__.py +25 -0
  24. azure/ai/evaluation/_common/raiclient/operations/_operations.py +1225 -0
  25. azure/ai/evaluation/_common/raiclient/operations/_patch.py +20 -0
  26. azure/ai/evaluation/_common/raiclient/py.typed +1 -0
  27. azure/ai/evaluation/_common/utils.py +23 -3
  28. azure/ai/evaluation/_constants.py +7 -0
  29. azure/ai/evaluation/_converters/__init__.py +3 -0
  30. azure/ai/evaluation/_converters/_ai_services.py +804 -0
  31. azure/ai/evaluation/_converters/_models.py +302 -0
  32. azure/ai/evaluation/_evaluate/_batch_run/__init__.py +10 -3
  33. azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +104 -0
  34. azure/ai/evaluation/_evaluate/_batch_run/batch_clients.py +82 -0
  35. azure/ai/evaluation/_evaluate/_batch_run/code_client.py +18 -12
  36. azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +9 -4
  37. azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +42 -22
  38. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +1 -1
  39. azure/ai/evaluation/_evaluate/_eval_run.py +2 -2
  40. azure/ai/evaluation/_evaluate/_evaluate.py +109 -64
  41. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +5 -89
  42. azure/ai/evaluation/_evaluate/_utils.py +3 -3
  43. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +23 -3
  44. azure/ai/evaluation/_evaluators/_code_vulnerability/__init__.py +5 -0
  45. azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +120 -0
  46. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +21 -2
  47. azure/ai/evaluation/_evaluators/_common/_base_eval.py +44 -4
  48. azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +4 -2
  49. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +44 -5
  50. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +16 -4
  51. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +42 -5
  52. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +15 -0
  53. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +15 -0
  54. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +15 -0
  55. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +15 -0
  56. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +28 -4
  57. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +21 -2
  58. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +26 -3
  59. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +22 -4
  60. azure/ai/evaluation/_evaluators/_intent_resolution/__init__.py +7 -0
  61. azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +152 -0
  62. azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +161 -0
  63. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +26 -3
  64. azure/ai/evaluation/_evaluators/_qa/_qa.py +51 -7
  65. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +26 -2
  66. azure/ai/evaluation/_evaluators/_response_completeness/__init__.py +7 -0
  67. azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +158 -0
  68. azure/ai/evaluation/_evaluators/_response_completeness/response_completeness.prompty +99 -0
  69. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +21 -2
  70. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +113 -4
  71. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +23 -3
  72. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +24 -5
  73. azure/ai/evaluation/_evaluators/_task_adherence/__init__.py +7 -0
  74. azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +148 -0
  75. azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +117 -0
  76. azure/ai/evaluation/_evaluators/_tool_call_accuracy/__init__.py +9 -0
  77. azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +292 -0
  78. azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +71 -0
  79. azure/ai/evaluation/_evaluators/_ungrounded_attributes/__init__.py +5 -0
  80. azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +103 -0
  81. azure/ai/evaluation/_evaluators/_xpia/xpia.py +2 -0
  82. azure/ai/evaluation/_exceptions.py +5 -0
  83. azure/ai/evaluation/_legacy/__init__.py +3 -0
  84. azure/ai/evaluation/_legacy/_adapters/__init__.py +21 -0
  85. azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
  86. azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
  87. azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
  88. azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
  89. azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
  90. azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
  91. azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
  92. azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
  93. azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
  94. azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
  95. azure/ai/evaluation/_legacy/_batch_engine/__init__.py +9 -0
  96. azure/ai/evaluation/_legacy/_batch_engine/_config.py +45 -0
  97. azure/ai/evaluation/_legacy/_batch_engine/_engine.py +368 -0
  98. azure/ai/evaluation/_legacy/_batch_engine/_exceptions.py +88 -0
  99. azure/ai/evaluation/_legacy/_batch_engine/_logging.py +292 -0
  100. azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +23 -0
  101. azure/ai/evaluation/_legacy/_batch_engine/_result.py +99 -0
  102. azure/ai/evaluation/_legacy/_batch_engine/_run.py +121 -0
  103. azure/ai/evaluation/_legacy/_batch_engine/_run_storage.py +128 -0
  104. azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +217 -0
  105. azure/ai/evaluation/_legacy/_batch_engine/_status.py +25 -0
  106. azure/ai/evaluation/_legacy/_batch_engine/_trace.py +105 -0
  107. azure/ai/evaluation/_legacy/_batch_engine/_utils.py +82 -0
  108. azure/ai/evaluation/_legacy/_batch_engine/_utils_deprecated.py +131 -0
  109. azure/ai/evaluation/_legacy/prompty/__init__.py +36 -0
  110. azure/ai/evaluation/_legacy/prompty/_connection.py +182 -0
  111. azure/ai/evaluation/_legacy/prompty/_exceptions.py +59 -0
  112. azure/ai/evaluation/_legacy/prompty/_prompty.py +313 -0
  113. azure/ai/evaluation/_legacy/prompty/_utils.py +545 -0
  114. azure/ai/evaluation/_legacy/prompty/_yaml_utils.py +99 -0
  115. azure/ai/evaluation/_safety_evaluation/__init__.py +1 -1
  116. azure/ai/evaluation/_safety_evaluation/_generated_rai_client.py +0 -0
  117. azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +251 -150
  118. azure/ai/evaluation/_version.py +1 -1
  119. azure/ai/evaluation/red_team/__init__.py +19 -0
  120. azure/ai/evaluation/red_team/_attack_objective_generator.py +195 -0
  121. azure/ai/evaluation/red_team/_attack_strategy.py +45 -0
  122. azure/ai/evaluation/red_team/_callback_chat_target.py +74 -0
  123. azure/ai/evaluation/red_team/_default_converter.py +21 -0
  124. azure/ai/evaluation/red_team/_red_team.py +1887 -0
  125. azure/ai/evaluation/red_team/_red_team_result.py +382 -0
  126. azure/ai/evaluation/red_team/_utils/__init__.py +3 -0
  127. azure/ai/evaluation/red_team/_utils/constants.py +65 -0
  128. azure/ai/evaluation/red_team/_utils/formatting_utils.py +165 -0
  129. azure/ai/evaluation/red_team/_utils/logging_utils.py +139 -0
  130. azure/ai/evaluation/red_team/_utils/strategy_utils.py +192 -0
  131. azure/ai/evaluation/simulator/_adversarial_scenario.py +3 -1
  132. azure/ai/evaluation/simulator/_adversarial_simulator.py +54 -27
  133. azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +145 -0
  134. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +71 -1
  135. azure/ai/evaluation/simulator/_simulator.py +1 -1
  136. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/METADATA +80 -15
  137. azure_ai_evaluation-1.5.0.dist-info/RECORD +207 -0
  138. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/WHEEL +1 -1
  139. azure/ai/evaluation/simulator/_tracing.py +0 -89
  140. azure_ai_evaluation-1.3.0.dist-info/RECORD +0 -119
  141. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/NOTICE.txt +0 -0
  142. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/top_level.txt +0 -0
@@ -3,4 +3,4 @@
3
3
  # ---------------------------------------------------------
4
4
  # represents upcoming version
5
5
 
6
- VERSION = "1.3.0"
6
+ VERSION = "1.5.0"
@@ -0,0 +1,19 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ try:
6
+ from ._red_team import RedTeam
7
+ from ._attack_strategy import AttackStrategy
8
+ from ._attack_objective_generator import RiskCategory
9
+ from ._red_team_result import RedTeamResult
10
+ except ImportError:
11
+ print("[INFO] Could not import Pyrit. Please install the dependency with `pip install azure-ai-evaluation[redteam]`.")
12
+
13
+
14
+ __all__ = [
15
+ "RedTeam",
16
+ "AttackStrategy",
17
+ "RiskCategory",
18
+ "RedTeamResult",
19
+ ]
@@ -0,0 +1,195 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ import json
5
+ import logging
6
+ import os
7
+ from enum import Enum
8
+ from typing import List, Optional, Dict, Any
9
+ from pathlib import Path
10
+ from azure.ai.evaluation._common._experimental import experimental
11
+
12
+
13
+ @experimental
14
+ class RiskCategory(str, Enum):
15
+ """Risk categories for attack objectives."""
16
+ HateUnfairness = "hate_unfairness"
17
+ Violence = "violence"
18
+ Sexual = "sexual"
19
+ SelfHarm = "self_harm"
20
+
21
+ class _AttackObjectiveGenerator:
22
+ """Generator for creating attack objectives.
23
+
24
+ :param risk_categories: List of risk categories to generate attack objectives for (optional if custom_attack_seed_prompts is provided)
25
+ :type risk_categories: Optional[List[RiskCategory]]
26
+ :param num_objectives: Number of objectives to generate per risk category
27
+ :type num_objectives: int
28
+ :param application_scenario: Description of the application scenario for context
29
+ :type application_scenario: Optional[str]
30
+ :param custom_attack_seed_prompts: Path to a JSON file containing custom attack seed prompts (can be absolute or relative path)
31
+ :type custom_attack_seed_prompts: Optional[str]
32
+ """
33
+ def __init__(
34
+ self,
35
+ risk_categories: Optional[List[RiskCategory]] = None,
36
+ num_objectives: int = 10,
37
+ application_scenario: Optional[str] = None,
38
+ custom_attack_seed_prompts: Optional[str] = None,
39
+ ):
40
+ self.risk_categories = risk_categories or []
41
+ self.num_objectives = num_objectives
42
+ self.application_scenario = application_scenario
43
+ self.custom_attack_seed_prompts = custom_attack_seed_prompts
44
+ self.logger = logging.getLogger("_AttackObjectiveGenerator")
45
+
46
+ # If custom_attack_seed_prompts is provided, validate and load them
47
+ self.custom_prompts = None
48
+ self.validated_prompts = []
49
+ self.valid_prompts_by_category = {}
50
+
51
+ if custom_attack_seed_prompts:
52
+ self._load_and_validate_custom_prompts()
53
+
54
+ def _load_and_validate_custom_prompts(self) -> None:
55
+ """Load and validate custom attack seed prompts from the provided file path."""
56
+ if not self.custom_attack_seed_prompts:
57
+ return
58
+
59
+ # Handle both absolute and relative paths
60
+ custom_prompts_path = Path(self.custom_attack_seed_prompts)
61
+
62
+ # Convert to absolute path if it's a relative path
63
+ if not custom_prompts_path.is_absolute():
64
+ self.logger.info(f"Converting relative path '{custom_prompts_path}' to absolute path")
65
+ custom_prompts_path = Path.cwd() / custom_prompts_path
66
+
67
+ self.logger.debug(f"Using absolute path: {custom_prompts_path}")
68
+
69
+ # Check if the file exists
70
+ if not custom_prompts_path.exists():
71
+ raise ValueError(f"Custom attack seed prompts file not found: {custom_prompts_path}")
72
+
73
+ try:
74
+ # Load JSON file
75
+ with open(custom_prompts_path, 'r', encoding='utf-8') as f:
76
+ self.custom_prompts = json.load(f)
77
+
78
+ # Validate that it's a list
79
+ if not isinstance(self.custom_prompts, list):
80
+ raise ValueError(f"Custom attack seed prompts must be a JSON array, got {type(self.custom_prompts)}, see https://aka.ms/airedteamingagent-howtodoc for more information")
81
+
82
+ self.logger.info(f"Loaded {len(self.custom_prompts)} prompts from {self.custom_attack_seed_prompts}")
83
+
84
+ # Initialize dictionary for categorized prompts
85
+ for risk_category in RiskCategory:
86
+ self.valid_prompts_by_category[risk_category.value] = []
87
+
88
+ # Process each prompt and validate format
89
+ valid_prompts_count = 0
90
+ invalid_prompts_count = 0
91
+ for i, prompt in enumerate(self.custom_prompts):
92
+ try:
93
+ # Check required fields
94
+ if not isinstance(prompt, dict):
95
+ self.logger.warning(f"Skipping prompt {i}: not a JSON object")
96
+ continue
97
+
98
+ if "metadata" not in prompt:
99
+ self.logger.warning(f"Skipping prompt {i}: missing 'metadata' field")
100
+ continue
101
+
102
+ if "messages" not in prompt or not prompt["messages"]:
103
+ self.logger.warning(f"Skipping prompt {i}: missing or empty 'messages' field")
104
+ continue
105
+
106
+ # Check metadata structure
107
+ metadata = prompt["metadata"]
108
+ if not isinstance(metadata, dict):
109
+ self.logger.warning(f"Skipping prompt {i}: 'metadata' is not a JSON object, see https://aka.ms/airedteamingagent-howtodoc for more information")
110
+ continue
111
+
112
+ if "target_harms" not in metadata or not metadata["target_harms"]:
113
+ self.logger.warning(f"Skipping prompt {i}: missing or empty 'target_harms' in metadata, see https://aka.ms/airedteamingagent-howtodoc for more information")
114
+ continue
115
+
116
+ # Check target_harms structure
117
+ valid_risk_types = {cat.value for cat in RiskCategory}
118
+ valid_risk_found = False
119
+ prompt_categories = []
120
+
121
+ for harm in metadata["target_harms"]:
122
+ if not isinstance(harm, dict):
123
+ self.logger.warning(f"Skipping harm in prompt {i}: not a JSON object, see https://aka.ms/airedteamingagent-howtodoc for more information")
124
+ continue
125
+
126
+ if "risk-type" not in harm:
127
+ self.logger.warning(f"Skipping harm in prompt {i}: missing 'risk-type' field, see https://aka.ms/airedteamingagent-howtodoc for more information")
128
+ continue
129
+
130
+ risk_type = harm.get("risk-type", "")
131
+ if risk_type not in valid_risk_types:
132
+ self.logger.warning(f"Skipping harm in prompt {i}: invalid risk-type '{risk_type}'. Valid types: {valid_risk_types}. see https://aka.ms/airedteamingagent-howtodoc for more information")
133
+ continue
134
+
135
+ prompt_categories.append(risk_type)
136
+ valid_risk_found = True
137
+
138
+ if not valid_risk_found:
139
+ self.logger.warning(f"Skipping prompt {i}: no valid risk types found. See https://aka.ms/airedteamingagent-howtodoc for more information")
140
+ continue
141
+
142
+ # Check messages structure
143
+ messages = prompt["messages"]
144
+ if not isinstance(messages, list) or not messages:
145
+ self.logger.warning(f"Skipping prompt {i}: 'messages' is not a list or is empty, see https://aka.ms/airedteamingagent-howtodoc for more information")
146
+ continue
147
+
148
+ message = messages[0]
149
+ if not isinstance(message, dict):
150
+ self.logger.warning(f"Skipping prompt {i}: first message is not a JSON object, see https://aka.ms/airedteamingagent-howtodoc for more information")
151
+ continue
152
+
153
+ if "role" not in message or message["role"] != "user":
154
+ self.logger.warning(f"Skipping prompt {i}: first message must have role='user', see https://aka.ms/airedteamingagent-howtodoc for more information")
155
+ continue
156
+
157
+ if "content" not in message or not message["content"]:
158
+ self.logger.warning(f"Skipping prompt {i}: first message missing or empty 'content', see https://aka.ms/airedteamingagent-howtodoc for more information")
159
+ continue
160
+
161
+ # If we got here, the prompt is valid
162
+ self.validated_prompts.append(prompt)
163
+ valid_prompts_count += 1
164
+
165
+ # Add to the appropriate categories
166
+ for category in prompt_categories:
167
+ self.valid_prompts_by_category[category].append(prompt)
168
+
169
+ except Exception as e:
170
+ self.logger.warning(f"Error validating prompt {i}: {str(e)}")
171
+ invalid_prompts_count += 1
172
+
173
+ # Check if we have at least one valid prompt
174
+ if valid_prompts_count == 0:
175
+ raise ValueError("No valid prompts found in custom attack seed prompts file. See https://aka.ms/airedteamingagent-howtodoc for more information")
176
+
177
+ self.logger.info(f"Loaded {valid_prompts_count} valid prompts from custom attack seed prompts file")
178
+
179
+ if invalid_prompts_count > 0:
180
+ self.logger.warning(f"Skipped {invalid_prompts_count} invalid prompts")
181
+
182
+ # Log the breakdown by risk category
183
+ category_counts = {cat: len(prompts) for cat, prompts in self.valid_prompts_by_category.items() if len(prompts) > 0}
184
+ self.logger.info(f"Prompt distribution by risk category: {category_counts}")
185
+
186
+ # Automatically extract risk categories from valid prompts if not provided
187
+ if not self.risk_categories:
188
+ categories_with_prompts = [cat for cat, prompts in self.valid_prompts_by_category.items() if prompts]
189
+ self.risk_categories = [RiskCategory(cat) for cat in categories_with_prompts]
190
+ self.logger.info(f"Automatically set risk categories based on valid prompts: {[cat.value for cat in self.risk_categories]}")
191
+
192
+ except json.JSONDecodeError as e:
193
+ raise ValueError(f"Failed to parse custom attack seed prompts file: {str(e)}. See https://aka.ms/airedteamingagent-howtodoc for more information")
194
+ except Exception as e:
195
+ raise ValueError(f"Error loading custom attack seed prompts: {str(e)}. See https://aka.ms/airedteamingagent-howtodoc for more information")
@@ -0,0 +1,45 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from enum import Enum
5
+ from typing import List
6
+ from azure.ai.evaluation._common._experimental import experimental
7
+
8
+
9
+ @experimental
10
+ class AttackStrategy(Enum):
11
+ """Strategies for attacks."""
12
+ EASY = "easy"
13
+ MODERATE = "moderate"
14
+ DIFFICULT = "difficult"
15
+ AnsiAttack = "ansi_attack"
16
+ AsciiArt = "ascii_art"
17
+ AsciiSmuggler = "ascii_smuggler"
18
+ Atbash = "atbash"
19
+ Base64 = "base64"
20
+ Binary = "binary"
21
+ Caesar = "caesar"
22
+ CharacterSpace = "character_space"
23
+ CharSwap = "char_swap"
24
+ Diacritic = "diacritic"
25
+ Flip = "flip"
26
+ Leetspeak = "leetspeak"
27
+ Morse = "morse"
28
+ ROT13 = "rot13"
29
+ SuffixAppend = "suffix_append"
30
+ StringJoin = "string_join"
31
+ Tense = "tense"
32
+ UnicodeConfusable = "unicode_confusable"
33
+ UnicodeSubstitution = "unicode_substitution"
34
+ Url = "url"
35
+ Baseline = "baseline"
36
+ Jailbreak = "jailbreak"
37
+
38
+ @classmethod
39
+ def Compose(cls, items: List["AttackStrategy"]) -> List["AttackStrategy"]:
40
+ for item in items:
41
+ if not isinstance(item, cls):
42
+ raise ValueError("All items must be instances of AttackStrategy")
43
+ if len(items) > 2:
44
+ raise ValueError("Composed strategies must have at most 2 items")
45
+ return items
@@ -0,0 +1,74 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ import logging
5
+ from typing import Any, Callable, Dict, List, Optional
6
+
7
+ from pyrit.models import (
8
+ PromptRequestResponse,
9
+ construct_response_from_request,
10
+ )
11
+ from pyrit.prompt_target import PromptChatTarget
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class _CallbackChatTarget(PromptChatTarget):
17
+ def __init__(
18
+ self,
19
+ *,
20
+ callback: Callable[[List[Dict], bool, Optional[str], Optional[Dict[str, Any]]], Dict],
21
+ stream: bool = False,
22
+ ) -> None:
23
+ """
24
+ Initializes an instance of the _CallbackChatTarget class.
25
+
26
+ It is intended to be used with PyRIT where users define a callback function
27
+ that handles sending a prompt to a target and receiving a response.
28
+ The _CallbackChatTarget class is a wrapper around the callback function that allows it to be used
29
+ as a target in the PyRIT framework.
30
+ For that reason, it merely handles additional functionality such as memory.
31
+
32
+ Args:
33
+ callback (Callable): The callback function that sends a prompt to a target and receives a response.
34
+ stream (bool, optional): Indicates whether the target supports streaming. Defaults to False.
35
+ """
36
+ PromptChatTarget.__init__(self)
37
+ self._callback = callback
38
+ self._stream = stream
39
+
40
+ async def send_prompt_async(self, *, prompt_request: PromptRequestResponse) -> PromptRequestResponse:
41
+
42
+ self._validate_request(prompt_request=prompt_request)
43
+ request = prompt_request.request_pieces[0]
44
+
45
+ messages = self._memory.get_chat_messages_with_conversation_id(conversation_id=request.conversation_id)
46
+
47
+ messages.append(request.to_chat_message())
48
+
49
+ logger.info(f"Sending the following prompt to the prompt target: {request}")
50
+
51
+ # response_context contains "messages", "stream", "session_state, "context"
52
+ response_context = await self._callback(messages=messages, stream=self._stream, session_state=None, context=None) # type: ignore
53
+
54
+ response_text = response_context["messages"][-1]["content"]
55
+ response_entry = construct_response_from_request(
56
+ request=request, response_text_pieces=[response_text]
57
+ )
58
+
59
+ logger.info(
60
+ "Received the following response from the prompt target"
61
+ + f"{response_text}"
62
+ )
63
+ return response_entry
64
+
65
+ def _validate_request(self, *, prompt_request: PromptRequestResponse) -> None:
66
+ if len(prompt_request.request_pieces) != 1:
67
+ raise ValueError("This target only supports a single prompt request piece.")
68
+
69
+ if prompt_request.request_pieces[0].converted_value_data_type != "text":
70
+ raise ValueError("This target only supports text prompt input.")
71
+
72
+ def is_json_response_supported(self) -> bool:
73
+ """Indicates that this target supports JSON response format."""
74
+ return False
@@ -0,0 +1,21 @@
1
+ from pyrit.models import PromptDataType
2
+ from pyrit.prompt_converter import ConverterResult, PromptConverter
3
+
4
+
5
+ class _DefaultConverter(PromptConverter):
6
+
7
+ async def convert_async(self, *, prompt: str, input_type: PromptDataType = "text") -> ConverterResult:
8
+ """
9
+ Simple converter that does nothing to the prompt and returns it as is.
10
+ """
11
+ if not self.input_supported(input_type):
12
+ raise ValueError("Input type not supported")
13
+
14
+ result = ConverterResult(output_text=prompt, output_type="text")
15
+ return result
16
+
17
+ def input_supported(self, input_type: PromptDataType) -> bool:
18
+ return input_type == "text"
19
+
20
+ def output_supported(self, output_type: PromptDataType) -> bool:
21
+ return output_type == "text"