azure-ai-evaluation 1.0.0b2__py3-none-any.whl → 1.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/__init__.py +100 -5
- azure/ai/evaluation/{_evaluators/_chat → _aoai}/__init__.py +3 -2
- azure/ai/evaluation/_aoai/aoai_grader.py +140 -0
- azure/ai/evaluation/_aoai/label_grader.py +68 -0
- azure/ai/evaluation/_aoai/python_grader.py +86 -0
- azure/ai/evaluation/_aoai/score_model_grader.py +94 -0
- azure/ai/evaluation/_aoai/string_check_grader.py +66 -0
- azure/ai/evaluation/_aoai/text_similarity_grader.py +80 -0
- azure/ai/evaluation/_azure/__init__.py +3 -0
- azure/ai/evaluation/_azure/_clients.py +204 -0
- azure/ai/evaluation/_azure/_envs.py +207 -0
- azure/ai/evaluation/_azure/_models.py +227 -0
- azure/ai/evaluation/_azure/_token_manager.py +129 -0
- azure/ai/evaluation/_common/__init__.py +9 -1
- azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +24 -9
- azure/ai/evaluation/_common/constants.py +131 -2
- azure/ai/evaluation/_common/evaluation_onedp_client.py +169 -0
- azure/ai/evaluation/_common/math.py +89 -0
- azure/ai/evaluation/_common/onedp/__init__.py +32 -0
- azure/ai/evaluation/_common/onedp/_client.py +166 -0
- azure/ai/evaluation/_common/onedp/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/_model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/_serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_types.py +21 -0
- azure/ai/evaluation/_common/onedp/_utils/__init__.py +6 -0
- azure/ai/evaluation/_common/onedp/_utils/model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_utils/serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_validation.py +66 -0
- azure/ai/evaluation/_common/onedp/_vendor.py +50 -0
- azure/ai/evaluation/_common/onedp/_version.py +9 -0
- azure/ai/evaluation/_common/onedp/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/onedp/aio/_client.py +168 -0
- azure/ai/evaluation/_common/onedp/aio/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/aio/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/aio/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_operations.py +7143 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/models/__init__.py +358 -0
- azure/ai/evaluation/_common/onedp/models/_enums.py +447 -0
- azure/ai/evaluation/_common/onedp/models/_models.py +5963 -0
- azure/ai/evaluation/_common/onedp/models/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/operations/_operations.py +8951 -0
- azure/ai/evaluation/_common/onedp/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/py.typed +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/rai_service.py +831 -142
- azure/ai/evaluation/_common/raiclient/__init__.py +34 -0
- azure/ai/evaluation/_common/raiclient/_client.py +128 -0
- azure/ai/evaluation/_common/raiclient/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/_model_base.py +1235 -0
- azure/ai/evaluation/_common/raiclient/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/_serialization.py +2050 -0
- azure/ai/evaluation/_common/raiclient/_version.py +9 -0
- azure/ai/evaluation/_common/raiclient/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/raiclient/aio/_client.py +130 -0
- azure/ai/evaluation/_common/raiclient/aio/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/aio/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_operations.py +981 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/models/__init__.py +60 -0
- azure/ai/evaluation/_common/raiclient/models/_enums.py +18 -0
- azure/ai/evaluation/_common/raiclient/models/_models.py +651 -0
- azure/ai/evaluation/_common/raiclient/models/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/operations/_operations.py +1238 -0
- azure/ai/evaluation/_common/raiclient/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/py.typed +1 -0
- azure/ai/evaluation/_common/utils.py +870 -34
- azure/ai/evaluation/_constants.py +167 -6
- azure/ai/evaluation/_converters/__init__.py +3 -0
- azure/ai/evaluation/_converters/_ai_services.py +899 -0
- azure/ai/evaluation/_converters/_models.py +467 -0
- azure/ai/evaluation/_converters/_sk_services.py +495 -0
- azure/ai/evaluation/_eval_mapping.py +83 -0
- azure/ai/evaluation/_evaluate/_batch_run/__init__.py +17 -0
- azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +176 -0
- azure/ai/evaluation/_evaluate/_batch_run/batch_clients.py +82 -0
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +47 -25
- azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +42 -13
- azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +124 -0
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +62 -0
- azure/ai/evaluation/_evaluate/_eval_run.py +102 -59
- azure/ai/evaluation/_evaluate/_evaluate.py +2134 -311
- azure/ai/evaluation/_evaluate/_evaluate_aoai.py +992 -0
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +14 -99
- azure/ai/evaluation/_evaluate/_utils.py +289 -40
- azure/ai/evaluation/_evaluator_definition.py +76 -0
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +93 -42
- azure/ai/evaluation/_evaluators/_code_vulnerability/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +119 -0
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +117 -91
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -39
- azure/ai/evaluation/_evaluators/_common/__init__.py +15 -0
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +742 -0
- azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +63 -0
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +345 -0
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +198 -0
- azure/ai/evaluation/_evaluators/_common/_conversation_aggregators.py +49 -0
- azure/ai/evaluation/_evaluators/_content_safety/__init__.py +0 -4
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +144 -86
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +138 -57
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +123 -55
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +133 -54
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +134 -54
- azure/ai/evaluation/_evaluators/_document_retrieval/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_document_retrieval/_document_retrieval.py +442 -0
- azure/ai/evaluation/_evaluators/_eci/_eci.py +49 -56
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +102 -60
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +115 -92
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -41
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +90 -37
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +318 -82
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +114 -0
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +104 -0
- azure/ai/evaluation/{_evaluate/_batch_run_client → _evaluators/_intent_resolution}/__init__.py +3 -4
- azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +196 -0
- azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +275 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +107 -61
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +104 -77
- azure/ai/evaluation/_evaluators/_qa/_qa.py +115 -63
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +182 -98
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +178 -49
- azure/ai/evaluation/_evaluators/_response_completeness/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +202 -0
- azure/ai/evaluation/_evaluators/_response_completeness/response_completeness.prompty +84 -0
- azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +148 -0
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +93 -0
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +189 -50
- azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +179 -0
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +102 -91
- azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +0 -5
- azure/ai/evaluation/_evaluators/_task_adherence/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +226 -0
- azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +101 -0
- azure/ai/evaluation/_evaluators/_task_completion/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_completion/_task_completion.py +177 -0
- azure/ai/evaluation/_evaluators/_task_completion/task_completion.prompty +220 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/_task_navigation_efficiency.py +384 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +298 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +166 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/_tool_input_accuracy.py +263 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/tool_input_accuracy.prompty +76 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/_tool_output_utilization.py +225 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/tool_output_utilization.prompty +221 -0
- azure/ai/evaluation/_evaluators/_tool_selection/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_selection/_tool_selection.py +266 -0
- azure/ai/evaluation/_evaluators/_tool_selection/tool_selection.prompty +104 -0
- azure/ai/evaluation/_evaluators/_tool_success/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_success/_tool_success.py +301 -0
- azure/ai/evaluation/_evaluators/_tool_success/tool_success.prompty +321 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +102 -0
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +109 -107
- azure/ai/evaluation/_exceptions.py +51 -7
- azure/ai/evaluation/_http_utils.py +210 -137
- azure/ai/evaluation/_legacy/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_adapters/__init__.py +7 -0
- azure/ai/evaluation/_legacy/_adapters/_check.py +17 -0
- azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
- azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
- azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
- azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
- azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
- azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
- azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
- azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
- azure/ai/evaluation/_legacy/_batch_engine/__init__.py +9 -0
- azure/ai/evaluation/_legacy/_batch_engine/_config.py +48 -0
- azure/ai/evaluation/_legacy/_batch_engine/_engine.py +477 -0
- azure/ai/evaluation/_legacy/_batch_engine/_exceptions.py +88 -0
- azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +132 -0
- azure/ai/evaluation/_legacy/_batch_engine/_result.py +107 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run.py +127 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_storage.py +128 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +262 -0
- azure/ai/evaluation/_legacy/_batch_engine/_status.py +25 -0
- azure/ai/evaluation/_legacy/_batch_engine/_trace.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils_deprecated.py +131 -0
- azure/ai/evaluation/_legacy/_common/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_common/_async_token_provider.py +117 -0
- azure/ai/evaluation/_legacy/_common/_logging.py +292 -0
- azure/ai/evaluation/_legacy/_common/_thread_pool_executor_with_context.py +17 -0
- azure/ai/evaluation/_legacy/prompty/__init__.py +36 -0
- azure/ai/evaluation/_legacy/prompty/_connection.py +119 -0
- azure/ai/evaluation/_legacy/prompty/_exceptions.py +139 -0
- azure/ai/evaluation/_legacy/prompty/_prompty.py +430 -0
- azure/ai/evaluation/_legacy/prompty/_utils.py +663 -0
- azure/ai/evaluation/_legacy/prompty/_yaml_utils.py +99 -0
- azure/ai/evaluation/_model_configurations.py +130 -8
- azure/ai/evaluation/_safety_evaluation/__init__.py +3 -0
- azure/ai/evaluation/_safety_evaluation/_generated_rai_client.py +0 -0
- azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +917 -0
- azure/ai/evaluation/_user_agent.py +32 -1
- azure/ai/evaluation/_vendor/__init__.py +3 -0
- azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +324 -0
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +59 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +59 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
- azure/ai/evaluation/_version.py +2 -1
- azure/ai/evaluation/red_team/__init__.py +22 -0
- azure/ai/evaluation/red_team/_agent/__init__.py +3 -0
- azure/ai/evaluation/red_team/_agent/_agent_functions.py +261 -0
- azure/ai/evaluation/red_team/_agent/_agent_tools.py +461 -0
- azure/ai/evaluation/red_team/_agent/_agent_utils.py +89 -0
- azure/ai/evaluation/red_team/_agent/_semantic_kernel_plugin.py +228 -0
- azure/ai/evaluation/red_team/_attack_objective_generator.py +268 -0
- azure/ai/evaluation/red_team/_attack_strategy.py +49 -0
- azure/ai/evaluation/red_team/_callback_chat_target.py +115 -0
- azure/ai/evaluation/red_team/_default_converter.py +21 -0
- azure/ai/evaluation/red_team/_evaluation_processor.py +505 -0
- azure/ai/evaluation/red_team/_mlflow_integration.py +430 -0
- azure/ai/evaluation/red_team/_orchestrator_manager.py +803 -0
- azure/ai/evaluation/red_team/_red_team.py +1717 -0
- azure/ai/evaluation/red_team/_red_team_result.py +661 -0
- azure/ai/evaluation/red_team/_result_processor.py +1708 -0
- azure/ai/evaluation/red_team/_utils/__init__.py +37 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +128 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_target.py +601 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_true_false_scorer.py +114 -0
- azure/ai/evaluation/red_team/_utils/constants.py +72 -0
- azure/ai/evaluation/red_team/_utils/exception_utils.py +345 -0
- azure/ai/evaluation/red_team/_utils/file_utils.py +266 -0
- azure/ai/evaluation/red_team/_utils/formatting_utils.py +365 -0
- azure/ai/evaluation/red_team/_utils/logging_utils.py +139 -0
- azure/ai/evaluation/red_team/_utils/metric_mapping.py +73 -0
- azure/ai/evaluation/red_team/_utils/objective_utils.py +46 -0
- azure/ai/evaluation/red_team/_utils/progress_utils.py +252 -0
- azure/ai/evaluation/red_team/_utils/retry_utils.py +218 -0
- azure/ai/evaluation/red_team/_utils/strategy_utils.py +218 -0
- azure/ai/evaluation/simulator/__init__.py +2 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +26 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +270 -144
- azure/ai/evaluation/simulator/_constants.py +12 -1
- azure/ai/evaluation/simulator/_conversation/__init__.py +151 -23
- azure/ai/evaluation/simulator/_conversation/_conversation.py +10 -6
- azure/ai/evaluation/simulator/_conversation/constants.py +1 -1
- azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
- azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +54 -75
- azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
- azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +1 -0
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +26 -5
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +145 -104
- azure/ai/evaluation/simulator/_model_tools/__init__.py +2 -1
- azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +225 -0
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +80 -30
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +117 -45
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +109 -7
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +97 -33
- azure/ai/evaluation/simulator/_model_tools/models.py +30 -27
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +6 -10
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -5
- azure/ai/evaluation/simulator/_simulator.py +302 -208
- azure/ai/evaluation/simulator/_utils.py +31 -13
- azure_ai_evaluation-1.13.3.dist-info/METADATA +939 -0
- azure_ai_evaluation-1.13.3.dist-info/RECORD +305 -0
- {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/WHEEL +1 -1
- azure_ai_evaluation-1.13.3.dist-info/licenses/NOTICE.txt +70 -0
- azure/ai/evaluation/_evaluate/_batch_run_client/proxy_client.py +0 -71
- azure/ai/evaluation/_evaluators/_chat/_chat.py +0 -357
- azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +0 -157
- azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +0 -48
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +0 -65
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -301
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -54
- azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +0 -5
- azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +0 -104
- azure/ai/evaluation/simulator/_tracing.py +0 -89
- azure_ai_evaluation-1.0.0b2.dist-info/METADATA +0 -449
- azure_ai_evaluation-1.0.0b2.dist-info/RECORD +0 -99
- {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,37 @@
|
|
|
1
1
|
# ---------------------------------------------------------
|
|
2
2
|
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
|
+
from contextlib import contextmanager
|
|
5
|
+
from typing import Iterator
|
|
6
|
+
|
|
4
7
|
from azure.ai.evaluation._version import VERSION
|
|
5
8
|
|
|
6
|
-
|
|
9
|
+
|
|
10
|
+
class UserAgentSingleton:
|
|
11
|
+
__BASE_USER_AGENT: str = "{}/{}".format("azure-ai-evaluation", VERSION)
|
|
12
|
+
|
|
13
|
+
@property
|
|
14
|
+
def value(self):
|
|
15
|
+
"""Get the user-agent"""
|
|
16
|
+
return self.__BASE_USER_AGENT
|
|
17
|
+
|
|
18
|
+
def __str__(self) -> str:
|
|
19
|
+
return self.value
|
|
20
|
+
|
|
21
|
+
@classmethod
|
|
22
|
+
@contextmanager
|
|
23
|
+
def add_useragent_product(cls, *product: str) -> Iterator[None]:
|
|
24
|
+
"""Appends a "product" (e.g. `name/version`) to the base user agent
|
|
25
|
+
|
|
26
|
+
:param product: User Agent products to append to the base user agent
|
|
27
|
+
|
|
28
|
+
..see-also::
|
|
29
|
+
|
|
30
|
+
`User-Agent section of RFC 9110, <https://www.rfc-editor.org/rfc/rfc9110#name-user-agent>`
|
|
31
|
+
"""
|
|
32
|
+
old_useragent = cls.__BASE_USER_AGENT
|
|
33
|
+
cls.__BASE_USER_AGENT = f"{old_useragent} {' '.join(product)}"
|
|
34
|
+
|
|
35
|
+
yield
|
|
36
|
+
|
|
37
|
+
cls.__BASE_USER_AGENT = old_useragent
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# Copyright 2024 The Google Research Authors.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# Copyright 2024 The Google Research Authors.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
# Portions Copyright (c) Microsoft Corporation
|
|
17
|
+
|
|
18
|
+
"""Computes rouge scores between two text blobs.
|
|
19
|
+
|
|
20
|
+
Implementation replicates the functionality in the original ROUGE package. See:
|
|
21
|
+
|
|
22
|
+
Lin, Chin-Yew. ROUGE: a Package for Automatic Evaluation of Summaries. In
|
|
23
|
+
Proceedings of the Workshop on Text Summarization Branches Out (WAS 2004),
|
|
24
|
+
Barcelona, Spain, July 25 - 26, 2004.
|
|
25
|
+
|
|
26
|
+
Default options are equivalent to running:
|
|
27
|
+
ROUGE-1.5.5.pl -e data -n 2 -a settings.xml
|
|
28
|
+
|
|
29
|
+
Or with use_stemmer=True:
|
|
30
|
+
ROUGE-1.5.5.pl -m -e data -n 2 -a settings.xml
|
|
31
|
+
|
|
32
|
+
In these examples settings.xml lists input files and formats.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
import collections
|
|
36
|
+
import re
|
|
37
|
+
|
|
38
|
+
from azure.ai.evaluation._vendor.rouge_score import scoring
|
|
39
|
+
from azure.ai.evaluation._vendor.rouge_score import tokenizers
|
|
40
|
+
|
|
41
|
+
import nltk
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class RougeScorer(scoring.BaseScorer):
|
|
45
|
+
"""Calculate rouges scores between two blobs of text.
|
|
46
|
+
|
|
47
|
+
Sample usage:
|
|
48
|
+
scorer = RougeScorer(['rouge1', 'rougeL'], use_stemmer=True)
|
|
49
|
+
scores = scorer.score('The quick brown fox jumps over the lazy dog',
|
|
50
|
+
'The quick brown dog jumps on the log.')
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(self, rouge_types, use_stemmer=False, split_summaries=False, tokenizer=None):
|
|
54
|
+
"""Initializes a new RougeScorer.
|
|
55
|
+
|
|
56
|
+
Valid rouge types that can be computed are:
|
|
57
|
+
rougen (e.g. rouge1, rouge2): n-gram based scoring.
|
|
58
|
+
rougeL: Longest common subsequence based scoring.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
rouge_types: A list of rouge types to calculate.
|
|
62
|
+
use_stemmer: Bool indicating whether Porter stemmer should be used to
|
|
63
|
+
strip word suffixes to improve matching. This arg is used in the
|
|
64
|
+
DefaultTokenizer, but other tokenizers might or might not choose to
|
|
65
|
+
use this.
|
|
66
|
+
split_summaries: whether to add newlines between sentences for rougeLsum
|
|
67
|
+
tokenizer: Tokenizer object which has a tokenize() method.
|
|
68
|
+
Returns:
|
|
69
|
+
A dict mapping rouge types to Score tuples.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
self.rouge_types = rouge_types
|
|
73
|
+
if tokenizer:
|
|
74
|
+
self._tokenizer = tokenizer
|
|
75
|
+
else:
|
|
76
|
+
self._tokenizer = tokenizers.DefaultTokenizer(use_stemmer)
|
|
77
|
+
|
|
78
|
+
self._split_summaries = split_summaries
|
|
79
|
+
|
|
80
|
+
def score_multi(self, targets, prediction):
|
|
81
|
+
"""Calculates rouge scores between targets and prediction.
|
|
82
|
+
|
|
83
|
+
The target with the maximum f-measure is used for the final score for
|
|
84
|
+
each score type..
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
targets: list of texts containing the targets
|
|
88
|
+
prediction: Text containing the predicted text.
|
|
89
|
+
Returns:
|
|
90
|
+
A dict mapping each rouge type to a Score object.
|
|
91
|
+
Raises:
|
|
92
|
+
ValueError: If an invalid rouge type is encountered.
|
|
93
|
+
"""
|
|
94
|
+
score_dicts = [self.score(t, prediction) for t in targets]
|
|
95
|
+
max_score = {}
|
|
96
|
+
for k in self.rouge_types:
|
|
97
|
+
fmeasures = [s[k].fmeasure for s in score_dicts]
|
|
98
|
+
index = fmeasures.index(max(fmeasures))
|
|
99
|
+
max_score[k] = score_dicts[index][k]
|
|
100
|
+
|
|
101
|
+
return max_score
|
|
102
|
+
|
|
103
|
+
def score(self, target, prediction):
|
|
104
|
+
"""Calculates rouge scores between the target and prediction.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
target: Text containing the target (ground truth) text,
|
|
108
|
+
or if a list
|
|
109
|
+
prediction: Text containing the predicted text.
|
|
110
|
+
Returns:
|
|
111
|
+
A dict mapping each rouge type to a Score object.
|
|
112
|
+
Raises:
|
|
113
|
+
ValueError: If an invalid rouge type is encountered.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
# Pre-compute target tokens and prediction tokens for use by different
|
|
117
|
+
# types, except if only "rougeLsum" is requested.
|
|
118
|
+
if len(self.rouge_types) == 1 and self.rouge_types[0] == "rougeLsum":
|
|
119
|
+
target_tokens = None
|
|
120
|
+
prediction_tokens = None
|
|
121
|
+
else:
|
|
122
|
+
target_tokens = self._tokenizer.tokenize(target)
|
|
123
|
+
prediction_tokens = self._tokenizer.tokenize(prediction)
|
|
124
|
+
result = {}
|
|
125
|
+
|
|
126
|
+
for rouge_type in self.rouge_types:
|
|
127
|
+
if rouge_type == "rougeL":
|
|
128
|
+
# Rouge from longest common subsequences.
|
|
129
|
+
scores = _score_lcs(target_tokens, prediction_tokens)
|
|
130
|
+
elif rouge_type == "rougeLsum":
|
|
131
|
+
# Note: Does not support multi-line text.
|
|
132
|
+
def get_sents(text):
|
|
133
|
+
if self._split_summaries:
|
|
134
|
+
sents = nltk.sent_tokenize(text)
|
|
135
|
+
else:
|
|
136
|
+
# Assume sentences are separated by newline.
|
|
137
|
+
sents = text.split("\n")
|
|
138
|
+
sents = [x for x in sents if len(x)]
|
|
139
|
+
return sents
|
|
140
|
+
|
|
141
|
+
target_tokens_list = [self._tokenizer.tokenize(s) for s in get_sents(target)]
|
|
142
|
+
prediction_tokens_list = [self._tokenizer.tokenize(s) for s in get_sents(prediction)]
|
|
143
|
+
|
|
144
|
+
scores = _summary_level_lcs(target_tokens_list, prediction_tokens_list)
|
|
145
|
+
elif re.match(r"rouge[0-9]$", rouge_type):
|
|
146
|
+
# Rouge from n-grams.
|
|
147
|
+
n = int(rouge_type[5:])
|
|
148
|
+
if n <= 0:
|
|
149
|
+
raise ValueError("rougen requires positive n: %s" % rouge_type)
|
|
150
|
+
target_ngrams = _create_ngrams(target_tokens, n)
|
|
151
|
+
prediction_ngrams = _create_ngrams(prediction_tokens, n)
|
|
152
|
+
scores = _score_ngrams(target_ngrams, prediction_ngrams)
|
|
153
|
+
else:
|
|
154
|
+
raise ValueError("Invalid rouge type: %s" % rouge_type)
|
|
155
|
+
result[rouge_type] = scores
|
|
156
|
+
|
|
157
|
+
return result
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def _create_ngrams(tokens, n):
|
|
161
|
+
"""Creates ngrams from the given list of tokens.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
tokens: A list of tokens from which ngrams are created.
|
|
165
|
+
n: Number of tokens to use, e.g. 2 for bigrams.
|
|
166
|
+
Returns:
|
|
167
|
+
A dictionary mapping each bigram to the number of occurrences.
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
ngrams = collections.Counter()
|
|
171
|
+
for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)):
|
|
172
|
+
ngrams[ngram] += 1
|
|
173
|
+
return ngrams
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def _score_lcs(target_tokens, prediction_tokens):
|
|
177
|
+
"""Computes LCS (Longest Common Subsequence) rouge scores.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
target_tokens: Tokens from the target text.
|
|
181
|
+
prediction_tokens: Tokens from the predicted text.
|
|
182
|
+
Returns:
|
|
183
|
+
A Score object containing computed scores.
|
|
184
|
+
"""
|
|
185
|
+
|
|
186
|
+
if not target_tokens or not prediction_tokens:
|
|
187
|
+
return scoring.Score(precision=0, recall=0, fmeasure=0)
|
|
188
|
+
|
|
189
|
+
# Compute length of LCS from the bottom up in a table (DP appproach).
|
|
190
|
+
lcs_table = _lcs_table(target_tokens, prediction_tokens)
|
|
191
|
+
lcs_length = lcs_table[-1][-1]
|
|
192
|
+
|
|
193
|
+
precision = lcs_length / len(prediction_tokens)
|
|
194
|
+
recall = lcs_length / len(target_tokens)
|
|
195
|
+
fmeasure = scoring.fmeasure(precision, recall)
|
|
196
|
+
|
|
197
|
+
return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _lcs_table(ref, can):
|
|
201
|
+
"""Create 2-d LCS score table."""
|
|
202
|
+
rows = len(ref)
|
|
203
|
+
cols = len(can)
|
|
204
|
+
lcs_table = [[0] * (cols + 1) for _ in range(rows + 1)]
|
|
205
|
+
for i in range(1, rows + 1):
|
|
206
|
+
for j in range(1, cols + 1):
|
|
207
|
+
if ref[i - 1] == can[j - 1]:
|
|
208
|
+
lcs_table[i][j] = lcs_table[i - 1][j - 1] + 1
|
|
209
|
+
else:
|
|
210
|
+
lcs_table[i][j] = max(lcs_table[i - 1][j], lcs_table[i][j - 1])
|
|
211
|
+
return lcs_table
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _backtrack_norec(t, ref, can):
|
|
215
|
+
"""Read out LCS."""
|
|
216
|
+
i = len(ref)
|
|
217
|
+
j = len(can)
|
|
218
|
+
lcs = []
|
|
219
|
+
while i > 0 and j > 0:
|
|
220
|
+
if ref[i - 1] == can[j - 1]:
|
|
221
|
+
lcs.insert(0, i - 1)
|
|
222
|
+
i -= 1
|
|
223
|
+
j -= 1
|
|
224
|
+
elif t[i][j - 1] > t[i - 1][j]:
|
|
225
|
+
j -= 1
|
|
226
|
+
else:
|
|
227
|
+
i -= 1
|
|
228
|
+
return lcs
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def _summary_level_lcs(ref_sent, can_sent):
|
|
232
|
+
"""ROUGE: Summary-level LCS, section 3.2 in ROUGE paper.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
ref_sent: list of tokenized reference sentences
|
|
236
|
+
can_sent: list of tokenized candidate sentences
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
summary level ROUGE score
|
|
240
|
+
"""
|
|
241
|
+
if not ref_sent or not can_sent:
|
|
242
|
+
return scoring.Score(precision=0, recall=0, fmeasure=0)
|
|
243
|
+
|
|
244
|
+
m = sum(map(len, ref_sent))
|
|
245
|
+
n = sum(map(len, can_sent))
|
|
246
|
+
if not n or not m:
|
|
247
|
+
return scoring.Score(precision=0, recall=0, fmeasure=0)
|
|
248
|
+
|
|
249
|
+
# get token counts to prevent double counting
|
|
250
|
+
token_cnts_r = collections.Counter()
|
|
251
|
+
token_cnts_c = collections.Counter()
|
|
252
|
+
for s in ref_sent:
|
|
253
|
+
# s is a list of tokens
|
|
254
|
+
token_cnts_r.update(s)
|
|
255
|
+
for s in can_sent:
|
|
256
|
+
token_cnts_c.update(s)
|
|
257
|
+
|
|
258
|
+
hits = 0
|
|
259
|
+
for r in ref_sent:
|
|
260
|
+
lcs = _union_lcs(r, can_sent)
|
|
261
|
+
# Prevent double-counting:
|
|
262
|
+
# The paper describes just computing hits += len(_union_lcs()),
|
|
263
|
+
# but the implementation prevents double counting. We also
|
|
264
|
+
# implement this as in version 1.5.5.
|
|
265
|
+
for t in lcs:
|
|
266
|
+
if token_cnts_c[t] > 0 and token_cnts_r[t] > 0:
|
|
267
|
+
hits += 1
|
|
268
|
+
token_cnts_c[t] -= 1
|
|
269
|
+
token_cnts_r[t] -= 1
|
|
270
|
+
|
|
271
|
+
recall = hits / m
|
|
272
|
+
precision = hits / n
|
|
273
|
+
fmeasure = scoring.fmeasure(precision, recall)
|
|
274
|
+
return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def _union_lcs(ref, c_list):
|
|
278
|
+
"""Find union LCS between a ref sentence and list of candidate sentences.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
ref: list of tokens
|
|
282
|
+
c_list: list of list of indices for LCS into reference summary
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
List of tokens in ref representing union LCS.
|
|
286
|
+
"""
|
|
287
|
+
lcs_list = [lcs_ind(ref, c) for c in c_list]
|
|
288
|
+
return [ref[i] for i in _find_union(lcs_list)]
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def _find_union(lcs_list):
|
|
292
|
+
"""Finds union LCS given a list of LCS."""
|
|
293
|
+
return sorted(list(set().union(*lcs_list)))
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def lcs_ind(ref, can):
|
|
297
|
+
"""Returns one of the longest lcs."""
|
|
298
|
+
t = _lcs_table(ref, can)
|
|
299
|
+
return _backtrack_norec(t, ref, can)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def _score_ngrams(target_ngrams, prediction_ngrams):
|
|
303
|
+
"""Compute n-gram based rouge scores.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
target_ngrams: A Counter object mapping each ngram to number of
|
|
307
|
+
occurrences for the target text.
|
|
308
|
+
prediction_ngrams: A Counter object mapping each ngram to number of
|
|
309
|
+
occurrences for the prediction text.
|
|
310
|
+
Returns:
|
|
311
|
+
A Score object containing computed scores.
|
|
312
|
+
"""
|
|
313
|
+
|
|
314
|
+
intersection_ngrams_count = 0
|
|
315
|
+
for ngram in target_ngrams.keys():
|
|
316
|
+
intersection_ngrams_count += min(target_ngrams[ngram], prediction_ngrams[ngram])
|
|
317
|
+
target_ngrams_count = sum(target_ngrams.values())
|
|
318
|
+
prediction_ngrams_count = sum(prediction_ngrams.values())
|
|
319
|
+
|
|
320
|
+
precision = intersection_ngrams_count / max(prediction_ngrams_count, 1)
|
|
321
|
+
recall = intersection_ngrams_count / max(target_ngrams_count, 1)
|
|
322
|
+
fmeasure = scoring.fmeasure(precision, recall)
|
|
323
|
+
|
|
324
|
+
return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure)
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# Copyright 2024 The Google Research Authors.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
# Portions Copyright (c) Microsoft Corporation
|
|
17
|
+
|
|
18
|
+
"""Library for scoring and evaluation of text samples.
|
|
19
|
+
|
|
20
|
+
Aggregation functions use bootstrap resampling to compute confidence intervals
|
|
21
|
+
as per the original ROUGE perl implementation.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import abc
|
|
25
|
+
import collections
|
|
26
|
+
from typing import Dict
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Score(collections.namedtuple("Score", ["precision", "recall", "fmeasure"])):
|
|
30
|
+
"""Tuple containing precision, recall, and f-measure values."""
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class BaseScorer(object, metaclass=abc.ABCMeta):
|
|
34
|
+
"""Base class for Scorer objects."""
|
|
35
|
+
|
|
36
|
+
@abc.abstractmethod
|
|
37
|
+
def score(self, target, prediction):
|
|
38
|
+
"""Calculates score between the target and prediction.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
target: Text containing the target (ground truth) text.
|
|
42
|
+
prediction: Text containing the predicted text.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
A dict mapping each score_type (string) to Score object.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AggregateScore(collections.namedtuple("AggregateScore", ["low", "mid", "high"])):
|
|
50
|
+
"""Tuple containing confidence intervals for scores."""
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def fmeasure(precision, recall):
|
|
54
|
+
"""Computes f-measure given precision and recall values."""
|
|
55
|
+
|
|
56
|
+
if precision + recall > 0:
|
|
57
|
+
return 2 * precision * recall / (precision + recall)
|
|
58
|
+
else:
|
|
59
|
+
return 0.0
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# Copyright 2024 The Google Research Authors.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
# Portions Copyright (c) Microsoft Corporation
|
|
17
|
+
|
|
18
|
+
"""A library for tokenizing text."""
|
|
19
|
+
|
|
20
|
+
import re
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Pre-compile regexes that are use often
|
|
24
|
+
NON_ALPHANUM_PATTERN = r"[^a-z0-9]+"
|
|
25
|
+
NON_ALPHANUM_RE = re.compile(NON_ALPHANUM_PATTERN)
|
|
26
|
+
SPACES_PATTERN = r"\s+"
|
|
27
|
+
SPACES_RE = re.compile(SPACES_PATTERN)
|
|
28
|
+
VALID_TOKEN_PATTERN = r"^[a-z0-9]+$"
|
|
29
|
+
VALID_TOKEN_RE = re.compile(VALID_TOKEN_PATTERN)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def tokenize(text, stemmer):
|
|
33
|
+
"""Tokenize input text into a list of tokens.
|
|
34
|
+
|
|
35
|
+
This approach aims to replicate the approach taken by Chin-Yew Lin in
|
|
36
|
+
the original ROUGE implementation.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
text: A text blob to tokenize.
|
|
40
|
+
stemmer: An optional stemmer.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
A list of string tokens extracted from input text.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
# Convert everything to lowercase.
|
|
47
|
+
text = text.lower()
|
|
48
|
+
# Replace any non-alpha-numeric characters with spaces.
|
|
49
|
+
text = NON_ALPHANUM_RE.sub(" ", text)
|
|
50
|
+
|
|
51
|
+
tokens = SPACES_RE.split(text)
|
|
52
|
+
if stemmer:
|
|
53
|
+
# Only stem words more than 3 characters long.
|
|
54
|
+
tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens]
|
|
55
|
+
|
|
56
|
+
# One final check to drop any empty or invalid tokens.
|
|
57
|
+
tokens = [x for x in tokens if VALID_TOKEN_RE.match(x)]
|
|
58
|
+
|
|
59
|
+
return tokens
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# Copyright 2024 The Google Research Authors.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
"""Library containing Tokenizer definitions.
|
|
17
|
+
|
|
18
|
+
The RougeScorer class can be instantiated with the tokenizers defined here. New
|
|
19
|
+
tokenizers can be defined by creating a subclass of the Tokenizer abstract class
|
|
20
|
+
and overriding the tokenize() method.
|
|
21
|
+
"""
|
|
22
|
+
import abc
|
|
23
|
+
|
|
24
|
+
from nltk.stem import porter
|
|
25
|
+
|
|
26
|
+
from azure.ai.evaluation._vendor.rouge_score import tokenize
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Tokenizer(abc.ABC):
|
|
30
|
+
"""Abstract base class for a tokenizer.
|
|
31
|
+
|
|
32
|
+
Subclasses of Tokenizer must implement the tokenize() method.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
@abc.abstractmethod
|
|
36
|
+
def tokenize(self, text):
|
|
37
|
+
raise NotImplementedError("Tokenizer must override tokenize() method")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class DefaultTokenizer(Tokenizer):
|
|
41
|
+
"""Default tokenizer which tokenizes on whitespace."""
|
|
42
|
+
|
|
43
|
+
def __init__(self, use_stemmer=False):
|
|
44
|
+
"""Constructor for DefaultTokenizer.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
use_stemmer: boolean, indicating whether Porter stemmer should be used to
|
|
48
|
+
strip word suffixes to improve matching.
|
|
49
|
+
"""
|
|
50
|
+
self._stemmer = porter.PorterStemmer() if use_stemmer else None
|
|
51
|
+
|
|
52
|
+
def tokenize(self, text):
|
|
53
|
+
return tokenize.tokenize(text, self._stemmer)
|
azure/ai/evaluation/_version.py
CHANGED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from ._red_team import RedTeam
|
|
7
|
+
from ._attack_strategy import AttackStrategy
|
|
8
|
+
from ._attack_objective_generator import RiskCategory, SupportedLanguages
|
|
9
|
+
from ._red_team_result import RedTeamResult
|
|
10
|
+
except ImportError:
|
|
11
|
+
raise ImportError(
|
|
12
|
+
"Could not import Pyrit. Please install the dependency with `pip install azure-ai-evaluation[redteam]`."
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"RedTeam",
|
|
18
|
+
"AttackStrategy",
|
|
19
|
+
"RiskCategory",
|
|
20
|
+
"RedTeamResult",
|
|
21
|
+
"SupportedLanguages",
|
|
22
|
+
]
|