azure-ai-evaluation 1.0.0b3__py3-none-any.whl → 1.0.0b5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (93) hide show
  1. azure/ai/evaluation/__init__.py +23 -1
  2. azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +20 -9
  3. azure/ai/evaluation/_common/constants.py +9 -2
  4. azure/ai/evaluation/_common/math.py +29 -0
  5. azure/ai/evaluation/_common/rai_service.py +222 -93
  6. azure/ai/evaluation/_common/utils.py +328 -19
  7. azure/ai/evaluation/_constants.py +16 -8
  8. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/__init__.py +3 -2
  9. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +33 -17
  10. azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +14 -7
  11. azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/proxy_client.py +22 -4
  12. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +35 -0
  13. azure/ai/evaluation/_evaluate/_eval_run.py +47 -14
  14. azure/ai/evaluation/_evaluate/_evaluate.py +370 -188
  15. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +15 -16
  16. azure/ai/evaluation/_evaluate/_utils.py +77 -25
  17. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +1 -1
  18. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +16 -10
  19. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -34
  20. azure/ai/evaluation/_evaluators/_common/_base_eval.py +76 -46
  21. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +26 -19
  22. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +62 -25
  23. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +68 -36
  24. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +67 -46
  25. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +33 -4
  26. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +33 -4
  27. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +33 -4
  28. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +33 -4
  29. azure/ai/evaluation/_evaluators/_eci/_eci.py +7 -5
  30. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +14 -6
  31. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +22 -21
  32. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -36
  33. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +1 -1
  34. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +51 -16
  35. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +113 -0
  36. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +99 -0
  37. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +3 -7
  38. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +20 -0
  39. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +130 -0
  40. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +57 -0
  41. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +96 -0
  42. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +120 -0
  43. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +96 -0
  44. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +96 -0
  45. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +96 -0
  46. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +46 -13
  47. azure/ai/evaluation/_evaluators/_qa/_qa.py +11 -6
  48. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +23 -20
  49. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +78 -42
  50. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +126 -80
  51. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +74 -24
  52. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +2 -2
  53. azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
  54. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +150 -0
  55. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +32 -15
  56. azure/ai/evaluation/_evaluators/_xpia/xpia.py +36 -10
  57. azure/ai/evaluation/_exceptions.py +26 -6
  58. azure/ai/evaluation/_http_utils.py +203 -132
  59. azure/ai/evaluation/_model_configurations.py +23 -6
  60. azure/ai/evaluation/_vendor/__init__.py +3 -0
  61. azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
  62. azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +328 -0
  63. azure/ai/evaluation/_vendor/rouge_score/scoring.py +63 -0
  64. azure/ai/evaluation/_vendor/rouge_score/tokenize.py +63 -0
  65. azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
  66. azure/ai/evaluation/_version.py +1 -1
  67. azure/ai/evaluation/simulator/__init__.py +2 -1
  68. azure/ai/evaluation/simulator/_adversarial_scenario.py +5 -0
  69. azure/ai/evaluation/simulator/_adversarial_simulator.py +88 -60
  70. azure/ai/evaluation/simulator/_conversation/__init__.py +13 -12
  71. azure/ai/evaluation/simulator/_conversation/_conversation.py +4 -4
  72. azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
  73. azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
  74. azure/ai/evaluation/simulator/_direct_attack_simulator.py +24 -66
  75. azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
  76. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +26 -5
  77. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +98 -95
  78. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +67 -21
  79. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +28 -11
  80. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +68 -24
  81. azure/ai/evaluation/simulator/_model_tools/models.py +10 -10
  82. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +4 -9
  83. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -5
  84. azure/ai/evaluation/simulator/_simulator.py +222 -169
  85. azure/ai/evaluation/simulator/_tracing.py +4 -4
  86. azure/ai/evaluation/simulator/_utils.py +6 -6
  87. {azure_ai_evaluation-1.0.0b3.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/METADATA +237 -52
  88. azure_ai_evaluation-1.0.0b5.dist-info/NOTICE.txt +70 -0
  89. azure_ai_evaluation-1.0.0b5.dist-info/RECORD +120 -0
  90. {azure_ai_evaluation-1.0.0b3.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/WHEEL +1 -1
  91. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -49
  92. azure_ai_evaluation-1.0.0b3.dist-info/RECORD +0 -98
  93. {azure_ai_evaluation-1.0.0b3.dist-info → azure_ai_evaluation-1.0.0b5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,328 @@
1
+ # coding=utf-8
2
+ # Copyright 2024 The Google Research Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Portions Copyright (c) Microsoft Corporation
17
+
18
+ """Computes rouge scores between two text blobs.
19
+
20
+ Implementation replicates the functionality in the original ROUGE package. See:
21
+
22
+ Lin, Chin-Yew. ROUGE: a Package for Automatic Evaluation of Summaries. In
23
+ Proceedings of the Workshop on Text Summarization Branches Out (WAS 2004),
24
+ Barcelona, Spain, July 25 - 26, 2004.
25
+
26
+ Default options are equivalent to running:
27
+ ROUGE-1.5.5.pl -e data -n 2 -a settings.xml
28
+
29
+ Or with use_stemmer=True:
30
+ ROUGE-1.5.5.pl -m -e data -n 2 -a settings.xml
31
+
32
+ In these examples settings.xml lists input files and formats.
33
+ """
34
+
35
+ from __future__ import absolute_import
36
+ from __future__ import division
37
+ from __future__ import print_function
38
+
39
+ import collections
40
+ import re
41
+
42
+ from azure.ai.evaluation._vendor.rouge_score import scoring
43
+ from azure.ai.evaluation._vendor.rouge_score import tokenizers
44
+
45
+ import nltk
46
+
47
+
48
+ class RougeScorer(scoring.BaseScorer):
49
+ """Calculate rouges scores between two blobs of text.
50
+
51
+ Sample usage:
52
+ scorer = RougeScorer(['rouge1', 'rougeL'], use_stemmer=True)
53
+ scores = scorer.score('The quick brown fox jumps over the lazy dog',
54
+ 'The quick brown dog jumps on the log.')
55
+ """
56
+
57
+ def __init__(self, rouge_types, use_stemmer=False, split_summaries=False, tokenizer=None):
58
+ """Initializes a new RougeScorer.
59
+
60
+ Valid rouge types that can be computed are:
61
+ rougen (e.g. rouge1, rouge2): n-gram based scoring.
62
+ rougeL: Longest common subsequence based scoring.
63
+
64
+ Args:
65
+ rouge_types: A list of rouge types to calculate.
66
+ use_stemmer: Bool indicating whether Porter stemmer should be used to
67
+ strip word suffixes to improve matching. This arg is used in the
68
+ DefaultTokenizer, but other tokenizers might or might not choose to
69
+ use this.
70
+ split_summaries: whether to add newlines between sentences for rougeLsum
71
+ tokenizer: Tokenizer object which has a tokenize() method.
72
+ Returns:
73
+ A dict mapping rouge types to Score tuples.
74
+ """
75
+
76
+ self.rouge_types = rouge_types
77
+ if tokenizer:
78
+ self._tokenizer = tokenizer
79
+ else:
80
+ self._tokenizer = tokenizers.DefaultTokenizer(use_stemmer)
81
+
82
+ self._split_summaries = split_summaries
83
+
84
+ def score_multi(self, targets, prediction):
85
+ """Calculates rouge scores between targets and prediction.
86
+
87
+ The target with the maximum f-measure is used for the final score for
88
+ each score type..
89
+
90
+ Args:
91
+ targets: list of texts containing the targets
92
+ prediction: Text containing the predicted text.
93
+ Returns:
94
+ A dict mapping each rouge type to a Score object.
95
+ Raises:
96
+ ValueError: If an invalid rouge type is encountered.
97
+ """
98
+ score_dicts = [self.score(t, prediction) for t in targets]
99
+ max_score = {}
100
+ for k in self.rouge_types:
101
+ fmeasures = [s[k].fmeasure for s in score_dicts]
102
+ index = fmeasures.index(max(fmeasures))
103
+ max_score[k] = score_dicts[index][k]
104
+
105
+ return max_score
106
+
107
+ def score(self, target, prediction):
108
+ """Calculates rouge scores between the target and prediction.
109
+
110
+ Args:
111
+ target: Text containing the target (ground truth) text,
112
+ or if a list
113
+ prediction: Text containing the predicted text.
114
+ Returns:
115
+ A dict mapping each rouge type to a Score object.
116
+ Raises:
117
+ ValueError: If an invalid rouge type is encountered.
118
+ """
119
+
120
+ # Pre-compute target tokens and prediction tokens for use by different
121
+ # types, except if only "rougeLsum" is requested.
122
+ if len(self.rouge_types) == 1 and self.rouge_types[0] == "rougeLsum":
123
+ target_tokens = None
124
+ prediction_tokens = None
125
+ else:
126
+ target_tokens = self._tokenizer.tokenize(target)
127
+ prediction_tokens = self._tokenizer.tokenize(prediction)
128
+ result = {}
129
+
130
+ for rouge_type in self.rouge_types:
131
+ if rouge_type == "rougeL":
132
+ # Rouge from longest common subsequences.
133
+ scores = _score_lcs(target_tokens, prediction_tokens)
134
+ elif rouge_type == "rougeLsum":
135
+ # Note: Does not support multi-line text.
136
+ def get_sents(text):
137
+ if self._split_summaries:
138
+ sents = nltk.sent_tokenize(text)
139
+ else:
140
+ # Assume sentences are separated by newline.
141
+ sents = text.split("\n")
142
+ sents = [x for x in sents if len(x)]
143
+ return sents
144
+
145
+ target_tokens_list = [self._tokenizer.tokenize(s) for s in get_sents(target)]
146
+ prediction_tokens_list = [self._tokenizer.tokenize(s) for s in get_sents(prediction)]
147
+
148
+ scores = _summary_level_lcs(target_tokens_list, prediction_tokens_list)
149
+ elif re.match(r"rouge[0-9]$", rouge_type):
150
+ # Rouge from n-grams.
151
+ n = int(rouge_type[5:])
152
+ if n <= 0:
153
+ raise ValueError("rougen requires positive n: %s" % rouge_type)
154
+ target_ngrams = _create_ngrams(target_tokens, n)
155
+ prediction_ngrams = _create_ngrams(prediction_tokens, n)
156
+ scores = _score_ngrams(target_ngrams, prediction_ngrams)
157
+ else:
158
+ raise ValueError("Invalid rouge type: %s" % rouge_type)
159
+ result[rouge_type] = scores
160
+
161
+ return result
162
+
163
+
164
+ def _create_ngrams(tokens, n):
165
+ """Creates ngrams from the given list of tokens.
166
+
167
+ Args:
168
+ tokens: A list of tokens from which ngrams are created.
169
+ n: Number of tokens to use, e.g. 2 for bigrams.
170
+ Returns:
171
+ A dictionary mapping each bigram to the number of occurrences.
172
+ """
173
+
174
+ ngrams = collections.Counter()
175
+ for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)):
176
+ ngrams[ngram] += 1
177
+ return ngrams
178
+
179
+
180
+ def _score_lcs(target_tokens, prediction_tokens):
181
+ """Computes LCS (Longest Common Subsequence) rouge scores.
182
+
183
+ Args:
184
+ target_tokens: Tokens from the target text.
185
+ prediction_tokens: Tokens from the predicted text.
186
+ Returns:
187
+ A Score object containing computed scores.
188
+ """
189
+
190
+ if not target_tokens or not prediction_tokens:
191
+ return scoring.Score(precision=0, recall=0, fmeasure=0)
192
+
193
+ # Compute length of LCS from the bottom up in a table (DP appproach).
194
+ lcs_table = _lcs_table(target_tokens, prediction_tokens)
195
+ lcs_length = lcs_table[-1][-1]
196
+
197
+ precision = lcs_length / len(prediction_tokens)
198
+ recall = lcs_length / len(target_tokens)
199
+ fmeasure = scoring.fmeasure(precision, recall)
200
+
201
+ return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
202
+
203
+
204
+ def _lcs_table(ref, can):
205
+ """Create 2-d LCS score table."""
206
+ rows = len(ref)
207
+ cols = len(can)
208
+ lcs_table = [[0] * (cols + 1) for _ in range(rows + 1)]
209
+ for i in range(1, rows + 1):
210
+ for j in range(1, cols + 1):
211
+ if ref[i - 1] == can[j - 1]:
212
+ lcs_table[i][j] = lcs_table[i - 1][j - 1] + 1
213
+ else:
214
+ lcs_table[i][j] = max(lcs_table[i - 1][j], lcs_table[i][j - 1])
215
+ return lcs_table
216
+
217
+
218
+ def _backtrack_norec(t, ref, can):
219
+ """Read out LCS."""
220
+ i = len(ref)
221
+ j = len(can)
222
+ lcs = []
223
+ while i > 0 and j > 0:
224
+ if ref[i - 1] == can[j - 1]:
225
+ lcs.insert(0, i - 1)
226
+ i -= 1
227
+ j -= 1
228
+ elif t[i][j - 1] > t[i - 1][j]:
229
+ j -= 1
230
+ else:
231
+ i -= 1
232
+ return lcs
233
+
234
+
235
+ def _summary_level_lcs(ref_sent, can_sent):
236
+ """ROUGE: Summary-level LCS, section 3.2 in ROUGE paper.
237
+
238
+ Args:
239
+ ref_sent: list of tokenized reference sentences
240
+ can_sent: list of tokenized candidate sentences
241
+
242
+ Returns:
243
+ summary level ROUGE score
244
+ """
245
+ if not ref_sent or not can_sent:
246
+ return scoring.Score(precision=0, recall=0, fmeasure=0)
247
+
248
+ m = sum(map(len, ref_sent))
249
+ n = sum(map(len, can_sent))
250
+ if not n or not m:
251
+ return scoring.Score(precision=0, recall=0, fmeasure=0)
252
+
253
+ # get token counts to prevent double counting
254
+ token_cnts_r = collections.Counter()
255
+ token_cnts_c = collections.Counter()
256
+ for s in ref_sent:
257
+ # s is a list of tokens
258
+ token_cnts_r.update(s)
259
+ for s in can_sent:
260
+ token_cnts_c.update(s)
261
+
262
+ hits = 0
263
+ for r in ref_sent:
264
+ lcs = _union_lcs(r, can_sent)
265
+ # Prevent double-counting:
266
+ # The paper describes just computing hits += len(_union_lcs()),
267
+ # but the implementation prevents double counting. We also
268
+ # implement this as in version 1.5.5.
269
+ for t in lcs:
270
+ if token_cnts_c[t] > 0 and token_cnts_r[t] > 0:
271
+ hits += 1
272
+ token_cnts_c[t] -= 1
273
+ token_cnts_r[t] -= 1
274
+
275
+ recall = hits / m
276
+ precision = hits / n
277
+ fmeasure = scoring.fmeasure(precision, recall)
278
+ return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
279
+
280
+
281
+ def _union_lcs(ref, c_list):
282
+ """Find union LCS between a ref sentence and list of candidate sentences.
283
+
284
+ Args:
285
+ ref: list of tokens
286
+ c_list: list of list of indices for LCS into reference summary
287
+
288
+ Returns:
289
+ List of tokens in ref representing union LCS.
290
+ """
291
+ lcs_list = [lcs_ind(ref, c) for c in c_list]
292
+ return [ref[i] for i in _find_union(lcs_list)]
293
+
294
+
295
+ def _find_union(lcs_list):
296
+ """Finds union LCS given a list of LCS."""
297
+ return sorted(list(set().union(*lcs_list)))
298
+
299
+
300
+ def lcs_ind(ref, can):
301
+ """Returns one of the longest lcs."""
302
+ t = _lcs_table(ref, can)
303
+ return _backtrack_norec(t, ref, can)
304
+
305
+
306
+ def _score_ngrams(target_ngrams, prediction_ngrams):
307
+ """Compute n-gram based rouge scores.
308
+
309
+ Args:
310
+ target_ngrams: A Counter object mapping each ngram to number of
311
+ occurrences for the target text.
312
+ prediction_ngrams: A Counter object mapping each ngram to number of
313
+ occurrences for the prediction text.
314
+ Returns:
315
+ A Score object containing computed scores.
316
+ """
317
+
318
+ intersection_ngrams_count = 0
319
+ for ngram in target_ngrams.keys():
320
+ intersection_ngrams_count += min(target_ngrams[ngram], prediction_ngrams[ngram])
321
+ target_ngrams_count = sum(target_ngrams.values())
322
+ prediction_ngrams_count = sum(prediction_ngrams.values())
323
+
324
+ precision = intersection_ngrams_count / max(prediction_ngrams_count, 1)
325
+ recall = intersection_ngrams_count / max(target_ngrams_count, 1)
326
+ fmeasure = scoring.fmeasure(precision, recall)
327
+
328
+ return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
@@ -0,0 +1,63 @@
1
+ # coding=utf-8
2
+ # Copyright 2024 The Google Research Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Portions Copyright (c) Microsoft Corporation
17
+
18
+ """Library for scoring and evaluation of text samples.
19
+
20
+ Aggregation functions use bootstrap resampling to compute confidence intervals
21
+ as per the original ROUGE perl implementation.
22
+ """
23
+
24
+ from __future__ import absolute_import
25
+ from __future__ import division
26
+ from __future__ import print_function
27
+
28
+ import abc
29
+ import collections
30
+ from typing import Dict
31
+
32
+
33
+ class Score(collections.namedtuple("Score", ["precision", "recall", "fmeasure"])):
34
+ """Tuple containing precision, recall, and f-measure values."""
35
+
36
+
37
+ class BaseScorer(object, metaclass=abc.ABCMeta):
38
+ """Base class for Scorer objects."""
39
+
40
+ @abc.abstractmethod
41
+ def score(self, target, prediction):
42
+ """Calculates score between the target and prediction.
43
+
44
+ Args:
45
+ target: Text containing the target (ground truth) text.
46
+ prediction: Text containing the predicted text.
47
+
48
+ Returns:
49
+ A dict mapping each score_type (string) to Score object.
50
+ """
51
+
52
+
53
+ class AggregateScore(collections.namedtuple("AggregateScore", ["low", "mid", "high"])):
54
+ """Tuple containing confidence intervals for scores."""
55
+
56
+
57
+ def fmeasure(precision, recall):
58
+ """Computes f-measure given precision and recall values."""
59
+
60
+ if precision + recall > 0:
61
+ return 2 * precision * recall / (precision + recall)
62
+ else:
63
+ return 0.0
@@ -0,0 +1,63 @@
1
+ # coding=utf-8
2
+ # Copyright 2024 The Google Research Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Portions Copyright (c) Microsoft Corporation
17
+
18
+ """A library for tokenizing text."""
19
+
20
+ from __future__ import absolute_import
21
+ from __future__ import division
22
+ from __future__ import print_function
23
+
24
+ import re
25
+
26
+
27
+ # Pre-compile regexes that are use often
28
+ NON_ALPHANUM_PATTERN = r"[^a-z0-9]+"
29
+ NON_ALPHANUM_RE = re.compile(NON_ALPHANUM_PATTERN)
30
+ SPACES_PATTERN = r"\s+"
31
+ SPACES_RE = re.compile(SPACES_PATTERN)
32
+ VALID_TOKEN_PATTERN = r"^[a-z0-9]+$"
33
+ VALID_TOKEN_RE = re.compile(VALID_TOKEN_PATTERN)
34
+
35
+
36
+ def tokenize(text, stemmer):
37
+ """Tokenize input text into a list of tokens.
38
+
39
+ This approach aims to replicate the approach taken by Chin-Yew Lin in
40
+ the original ROUGE implementation.
41
+
42
+ Args:
43
+ text: A text blob to tokenize.
44
+ stemmer: An optional stemmer.
45
+
46
+ Returns:
47
+ A list of string tokens extracted from input text.
48
+ """
49
+
50
+ # Convert everything to lowercase.
51
+ text = text.lower()
52
+ # Replace any non-alpha-numeric characters with spaces.
53
+ text = NON_ALPHANUM_RE.sub(" ", text)
54
+
55
+ tokens = SPACES_RE.split(text)
56
+ if stemmer:
57
+ # Only stem words more than 3 characters long.
58
+ tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens]
59
+
60
+ # One final check to drop any empty or invalid tokens.
61
+ tokens = [x for x in tokens if VALID_TOKEN_RE.match(x)]
62
+
63
+ return tokens
@@ -0,0 +1,53 @@
1
+ # coding=utf-8
2
+ # Copyright 2024 The Google Research Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Library containing Tokenizer definitions.
17
+
18
+ The RougeScorer class can be instantiated with the tokenizers defined here. New
19
+ tokenizers can be defined by creating a subclass of the Tokenizer abstract class
20
+ and overriding the tokenize() method.
21
+ """
22
+ import abc
23
+
24
+ from nltk.stem import porter
25
+
26
+ from azure.ai.evaluation._vendor.rouge_score import tokenize
27
+
28
+
29
+ class Tokenizer(abc.ABC):
30
+ """Abstract base class for a tokenizer.
31
+
32
+ Subclasses of Tokenizer must implement the tokenize() method.
33
+ """
34
+
35
+ @abc.abstractmethod
36
+ def tokenize(self, text):
37
+ raise NotImplementedError("Tokenizer must override tokenize() method")
38
+
39
+
40
+ class DefaultTokenizer(Tokenizer):
41
+ """Default tokenizer which tokenizes on whitespace."""
42
+
43
+ def __init__(self, use_stemmer=False):
44
+ """Constructor for DefaultTokenizer.
45
+
46
+ Args:
47
+ use_stemmer: boolean, indicating whether Porter stemmer should be used to
48
+ strip word suffixes to improve matching.
49
+ """
50
+ self._stemmer = porter.PorterStemmer() if use_stemmer else None
51
+
52
+ def tokenize(self, text):
53
+ return tokenize.tokenize(text, self._stemmer)
@@ -2,4 +2,4 @@
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
4
 
5
- VERSION = "1.0.0b3"
5
+ VERSION = "1.0.0b5"
@@ -1,4 +1,4 @@
1
- from ._adversarial_scenario import AdversarialScenario
1
+ from ._adversarial_scenario import AdversarialScenario, AdversarialScenarioJailbreak
2
2
  from ._adversarial_simulator import AdversarialSimulator
3
3
  from ._constants import SupportedLanguages
4
4
  from ._direct_attack_simulator import DirectAttackSimulator
@@ -8,6 +8,7 @@ from ._simulator import Simulator
8
8
  __all__ = [
9
9
  "AdversarialSimulator",
10
10
  "AdversarialScenario",
11
+ "AdversarialScenarioJailbreak",
11
12
  "DirectAttackSimulator",
12
13
  "IndirectAttackSimulator",
13
14
  "SupportedLanguages",
@@ -16,6 +16,11 @@ class AdversarialScenario(Enum):
16
16
  ADVERSARIAL_CONTENT_GEN_UNGROUNDED = "adv_content_gen_ungrounded"
17
17
  ADVERSARIAL_CONTENT_GEN_GROUNDED = "adv_content_gen_grounded"
18
18
  ADVERSARIAL_CONTENT_PROTECTED_MATERIAL = "adv_content_protected_material"
19
+
20
+
21
+ class AdversarialScenarioJailbreak(Enum):
22
+ """Adversarial scenario types for XPIA Jailbreak"""
23
+
19
24
  ADVERSARIAL_INDIRECT_JAILBREAK = "adv_xpia"
20
25
 
21
26