azure-ai-evaluation 1.0.0b2__py3-none-any.whl → 1.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/__init__.py +100 -5
- azure/ai/evaluation/{_evaluators/_chat → _aoai}/__init__.py +3 -2
- azure/ai/evaluation/_aoai/aoai_grader.py +140 -0
- azure/ai/evaluation/_aoai/label_grader.py +68 -0
- azure/ai/evaluation/_aoai/python_grader.py +86 -0
- azure/ai/evaluation/_aoai/score_model_grader.py +94 -0
- azure/ai/evaluation/_aoai/string_check_grader.py +66 -0
- azure/ai/evaluation/_aoai/text_similarity_grader.py +80 -0
- azure/ai/evaluation/_azure/__init__.py +3 -0
- azure/ai/evaluation/_azure/_clients.py +204 -0
- azure/ai/evaluation/_azure/_envs.py +207 -0
- azure/ai/evaluation/_azure/_models.py +227 -0
- azure/ai/evaluation/_azure/_token_manager.py +129 -0
- azure/ai/evaluation/_common/__init__.py +9 -1
- azure/ai/evaluation/{simulator/_helpers → _common}/_experimental.py +24 -9
- azure/ai/evaluation/_common/constants.py +131 -2
- azure/ai/evaluation/_common/evaluation_onedp_client.py +169 -0
- azure/ai/evaluation/_common/math.py +89 -0
- azure/ai/evaluation/_common/onedp/__init__.py +32 -0
- azure/ai/evaluation/_common/onedp/_client.py +166 -0
- azure/ai/evaluation/_common/onedp/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/_model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/_serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_types.py +21 -0
- azure/ai/evaluation/_common/onedp/_utils/__init__.py +6 -0
- azure/ai/evaluation/_common/onedp/_utils/model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_utils/serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_validation.py +66 -0
- azure/ai/evaluation/_common/onedp/_vendor.py +50 -0
- azure/ai/evaluation/_common/onedp/_version.py +9 -0
- azure/ai/evaluation/_common/onedp/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/onedp/aio/_client.py +168 -0
- azure/ai/evaluation/_common/onedp/aio/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/aio/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/aio/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_operations.py +7143 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/models/__init__.py +358 -0
- azure/ai/evaluation/_common/onedp/models/_enums.py +447 -0
- azure/ai/evaluation/_common/onedp/models/_models.py +5963 -0
- azure/ai/evaluation/_common/onedp/models/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/operations/_operations.py +8951 -0
- azure/ai/evaluation/_common/onedp/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/py.typed +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/rai_service.py +831 -142
- azure/ai/evaluation/_common/raiclient/__init__.py +34 -0
- azure/ai/evaluation/_common/raiclient/_client.py +128 -0
- azure/ai/evaluation/_common/raiclient/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/_model_base.py +1235 -0
- azure/ai/evaluation/_common/raiclient/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/_serialization.py +2050 -0
- azure/ai/evaluation/_common/raiclient/_version.py +9 -0
- azure/ai/evaluation/_common/raiclient/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/raiclient/aio/_client.py +130 -0
- azure/ai/evaluation/_common/raiclient/aio/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/aio/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_operations.py +981 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/models/__init__.py +60 -0
- azure/ai/evaluation/_common/raiclient/models/_enums.py +18 -0
- azure/ai/evaluation/_common/raiclient/models/_models.py +651 -0
- azure/ai/evaluation/_common/raiclient/models/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/operations/_operations.py +1238 -0
- azure/ai/evaluation/_common/raiclient/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/py.typed +1 -0
- azure/ai/evaluation/_common/utils.py +870 -34
- azure/ai/evaluation/_constants.py +167 -6
- azure/ai/evaluation/_converters/__init__.py +3 -0
- azure/ai/evaluation/_converters/_ai_services.py +899 -0
- azure/ai/evaluation/_converters/_models.py +467 -0
- azure/ai/evaluation/_converters/_sk_services.py +495 -0
- azure/ai/evaluation/_eval_mapping.py +83 -0
- azure/ai/evaluation/_evaluate/_batch_run/__init__.py +17 -0
- azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +176 -0
- azure/ai/evaluation/_evaluate/_batch_run/batch_clients.py +82 -0
- azure/ai/evaluation/_evaluate/{_batch_run_client → _batch_run}/code_client.py +47 -25
- azure/ai/evaluation/_evaluate/{_batch_run_client/batch_run_context.py → _batch_run/eval_run_context.py} +42 -13
- azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +124 -0
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +62 -0
- azure/ai/evaluation/_evaluate/_eval_run.py +102 -59
- azure/ai/evaluation/_evaluate/_evaluate.py +2134 -311
- azure/ai/evaluation/_evaluate/_evaluate_aoai.py +992 -0
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +14 -99
- azure/ai/evaluation/_evaluate/_utils.py +289 -40
- azure/ai/evaluation/_evaluator_definition.py +76 -0
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +93 -42
- azure/ai/evaluation/_evaluators/_code_vulnerability/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +119 -0
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +117 -91
- azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +76 -39
- azure/ai/evaluation/_evaluators/_common/__init__.py +15 -0
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +742 -0
- azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +63 -0
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +345 -0
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +198 -0
- azure/ai/evaluation/_evaluators/_common/_conversation_aggregators.py +49 -0
- azure/ai/evaluation/_evaluators/_content_safety/__init__.py +0 -4
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +144 -86
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +138 -57
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +123 -55
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +133 -54
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +134 -54
- azure/ai/evaluation/_evaluators/_document_retrieval/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_document_retrieval/_document_retrieval.py +442 -0
- azure/ai/evaluation/_evaluators/_eci/_eci.py +49 -56
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +102 -60
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +115 -92
- azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +66 -41
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +90 -37
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +318 -82
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +114 -0
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +104 -0
- azure/ai/evaluation/{_evaluate/_batch_run_client → _evaluators/_intent_resolution}/__init__.py +3 -4
- azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +196 -0
- azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +275 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +107 -61
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +104 -77
- azure/ai/evaluation/_evaluators/_qa/_qa.py +115 -63
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +182 -98
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +178 -49
- azure/ai/evaluation/_evaluators/_response_completeness/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +202 -0
- azure/ai/evaluation/_evaluators/_response_completeness/response_completeness.prompty +84 -0
- azure/ai/evaluation/_evaluators/{_chat/retrieval → _retrieval}/__init__.py +2 -2
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +148 -0
- azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +93 -0
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +189 -50
- azure/ai/evaluation/_evaluators/_service_groundedness/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +179 -0
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +102 -91
- azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +0 -5
- azure/ai/evaluation/_evaluators/_task_adherence/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +226 -0
- azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +101 -0
- azure/ai/evaluation/_evaluators/_task_completion/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_completion/_task_completion.py +177 -0
- azure/ai/evaluation/_evaluators/_task_completion/task_completion.prompty +220 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/_task_navigation_efficiency.py +384 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +298 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +166 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/_tool_input_accuracy.py +263 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/tool_input_accuracy.prompty +76 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/_tool_output_utilization.py +225 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/tool_output_utilization.prompty +221 -0
- azure/ai/evaluation/_evaluators/_tool_selection/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_selection/_tool_selection.py +266 -0
- azure/ai/evaluation/_evaluators/_tool_selection/tool_selection.prompty +104 -0
- azure/ai/evaluation/_evaluators/_tool_success/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_success/_tool_success.py +301 -0
- azure/ai/evaluation/_evaluators/_tool_success/tool_success.prompty +321 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +102 -0
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +109 -107
- azure/ai/evaluation/_exceptions.py +51 -7
- azure/ai/evaluation/_http_utils.py +210 -137
- azure/ai/evaluation/_legacy/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_adapters/__init__.py +7 -0
- azure/ai/evaluation/_legacy/_adapters/_check.py +17 -0
- azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
- azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
- azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
- azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
- azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
- azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
- azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
- azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
- azure/ai/evaluation/_legacy/_batch_engine/__init__.py +9 -0
- azure/ai/evaluation/_legacy/_batch_engine/_config.py +48 -0
- azure/ai/evaluation/_legacy/_batch_engine/_engine.py +477 -0
- azure/ai/evaluation/_legacy/_batch_engine/_exceptions.py +88 -0
- azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +132 -0
- azure/ai/evaluation/_legacy/_batch_engine/_result.py +107 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run.py +127 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_storage.py +128 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +262 -0
- azure/ai/evaluation/_legacy/_batch_engine/_status.py +25 -0
- azure/ai/evaluation/_legacy/_batch_engine/_trace.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils_deprecated.py +131 -0
- azure/ai/evaluation/_legacy/_common/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_common/_async_token_provider.py +117 -0
- azure/ai/evaluation/_legacy/_common/_logging.py +292 -0
- azure/ai/evaluation/_legacy/_common/_thread_pool_executor_with_context.py +17 -0
- azure/ai/evaluation/_legacy/prompty/__init__.py +36 -0
- azure/ai/evaluation/_legacy/prompty/_connection.py +119 -0
- azure/ai/evaluation/_legacy/prompty/_exceptions.py +139 -0
- azure/ai/evaluation/_legacy/prompty/_prompty.py +430 -0
- azure/ai/evaluation/_legacy/prompty/_utils.py +663 -0
- azure/ai/evaluation/_legacy/prompty/_yaml_utils.py +99 -0
- azure/ai/evaluation/_model_configurations.py +130 -8
- azure/ai/evaluation/_safety_evaluation/__init__.py +3 -0
- azure/ai/evaluation/_safety_evaluation/_generated_rai_client.py +0 -0
- azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +917 -0
- azure/ai/evaluation/_user_agent.py +32 -1
- azure/ai/evaluation/_vendor/__init__.py +3 -0
- azure/ai/evaluation/_vendor/rouge_score/__init__.py +14 -0
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +324 -0
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +59 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +59 -0
- azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +53 -0
- azure/ai/evaluation/_version.py +2 -1
- azure/ai/evaluation/red_team/__init__.py +22 -0
- azure/ai/evaluation/red_team/_agent/__init__.py +3 -0
- azure/ai/evaluation/red_team/_agent/_agent_functions.py +261 -0
- azure/ai/evaluation/red_team/_agent/_agent_tools.py +461 -0
- azure/ai/evaluation/red_team/_agent/_agent_utils.py +89 -0
- azure/ai/evaluation/red_team/_agent/_semantic_kernel_plugin.py +228 -0
- azure/ai/evaluation/red_team/_attack_objective_generator.py +268 -0
- azure/ai/evaluation/red_team/_attack_strategy.py +49 -0
- azure/ai/evaluation/red_team/_callback_chat_target.py +115 -0
- azure/ai/evaluation/red_team/_default_converter.py +21 -0
- azure/ai/evaluation/red_team/_evaluation_processor.py +505 -0
- azure/ai/evaluation/red_team/_mlflow_integration.py +430 -0
- azure/ai/evaluation/red_team/_orchestrator_manager.py +803 -0
- azure/ai/evaluation/red_team/_red_team.py +1717 -0
- azure/ai/evaluation/red_team/_red_team_result.py +661 -0
- azure/ai/evaluation/red_team/_result_processor.py +1708 -0
- azure/ai/evaluation/red_team/_utils/__init__.py +37 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +128 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_target.py +601 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_true_false_scorer.py +114 -0
- azure/ai/evaluation/red_team/_utils/constants.py +72 -0
- azure/ai/evaluation/red_team/_utils/exception_utils.py +345 -0
- azure/ai/evaluation/red_team/_utils/file_utils.py +266 -0
- azure/ai/evaluation/red_team/_utils/formatting_utils.py +365 -0
- azure/ai/evaluation/red_team/_utils/logging_utils.py +139 -0
- azure/ai/evaluation/red_team/_utils/metric_mapping.py +73 -0
- azure/ai/evaluation/red_team/_utils/objective_utils.py +46 -0
- azure/ai/evaluation/red_team/_utils/progress_utils.py +252 -0
- azure/ai/evaluation/red_team/_utils/retry_utils.py +218 -0
- azure/ai/evaluation/red_team/_utils/strategy_utils.py +218 -0
- azure/ai/evaluation/simulator/__init__.py +2 -1
- azure/ai/evaluation/simulator/_adversarial_scenario.py +26 -1
- azure/ai/evaluation/simulator/_adversarial_simulator.py +270 -144
- azure/ai/evaluation/simulator/_constants.py +12 -1
- azure/ai/evaluation/simulator/_conversation/__init__.py +151 -23
- azure/ai/evaluation/simulator/_conversation/_conversation.py +10 -6
- azure/ai/evaluation/simulator/_conversation/constants.py +1 -1
- azure/ai/evaluation/simulator/_data_sources/__init__.py +3 -0
- azure/ai/evaluation/simulator/_data_sources/grounding.json +1150 -0
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +54 -75
- azure/ai/evaluation/simulator/_helpers/__init__.py +1 -2
- azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +1 -0
- azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +26 -5
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +145 -104
- azure/ai/evaluation/simulator/_model_tools/__init__.py +2 -1
- azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +225 -0
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +80 -30
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +117 -45
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +109 -7
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +97 -33
- azure/ai/evaluation/simulator/_model_tools/models.py +30 -27
- azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +6 -10
- azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +6 -5
- azure/ai/evaluation/simulator/_simulator.py +302 -208
- azure/ai/evaluation/simulator/_utils.py +31 -13
- azure_ai_evaluation-1.13.3.dist-info/METADATA +939 -0
- azure_ai_evaluation-1.13.3.dist-info/RECORD +305 -0
- {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/WHEEL +1 -1
- azure_ai_evaluation-1.13.3.dist-info/licenses/NOTICE.txt +70 -0
- azure/ai/evaluation/_evaluate/_batch_run_client/proxy_client.py +0 -71
- azure/ai/evaluation/_evaluators/_chat/_chat.py +0 -357
- azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +0 -157
- azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +0 -48
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +0 -65
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +0 -301
- azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +0 -54
- azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +0 -5
- azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +0 -104
- azure/ai/evaluation/simulator/_tracing.py +0 -89
- azure_ai_evaluation-1.0.0b2.dist-info/METADATA +0 -449
- azure_ai_evaluation-1.0.0b2.dist-info/RECORD +0 -99
- {azure_ai_evaluation-1.0.0b2.dist-info → azure_ai_evaluation-1.13.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,384 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from collections import Counter
|
|
6
|
+
import json
|
|
7
|
+
from typing import Dict, List, Union, Any, Tuple
|
|
8
|
+
from typing_extensions import overload, override
|
|
9
|
+
|
|
10
|
+
from azure.ai.evaluation._constants import EVALUATION_PASS_FAIL_MAPPING
|
|
11
|
+
from azure.ai.evaluation._evaluators._common import EvaluatorBase
|
|
12
|
+
from azure.ai.evaluation._exceptions import (
|
|
13
|
+
ErrorCategory,
|
|
14
|
+
ErrorTarget,
|
|
15
|
+
EvaluationException,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class _TaskNavigationEfficiencyMatchingMode(str, Enum):
|
|
20
|
+
"""
|
|
21
|
+
Enumeration of task navigation efficiency matching mode.
|
|
22
|
+
|
|
23
|
+
This enum allows you to specify which single matching technique should be used when evaluating
|
|
24
|
+
the efficiency of an agent's tool calls sequence against a ground truth path.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
EXACT_MATCH = "exact_match"
|
|
28
|
+
"""
|
|
29
|
+
Binary metric indicating whether the agent's tool calls exactly match the ground truth.
|
|
30
|
+
|
|
31
|
+
Returns True only if the agent's tool calls sequence is identical to the expected sequence
|
|
32
|
+
in both order and content (no extra steps, no missing steps, correct order).
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
IN_ORDER_MATCH = "in_order_match"
|
|
36
|
+
"""
|
|
37
|
+
Binary metric allowing extra steps but requiring correct order of required tool calls.
|
|
38
|
+
|
|
39
|
+
Returns True if all ground truth steps appear in the agent's sequence in the correct
|
|
40
|
+
order, even if there are additional steps interspersed.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
ANY_ORDER_MATCH = "any_order_match"
|
|
44
|
+
"""
|
|
45
|
+
Binary metric allowing both extra steps and different ordering.
|
|
46
|
+
|
|
47
|
+
Returns True if all ground truth steps appear in the agent's sequence with sufficient
|
|
48
|
+
frequency, regardless of order. Most lenient matching criterion.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class _TaskNavigationEfficiencyEvaluator(EvaluatorBase):
|
|
53
|
+
"""
|
|
54
|
+
Evaluates whether an agent's sequence of actions is efficient and follows optimal decision-making patterns.
|
|
55
|
+
|
|
56
|
+
The Task Navigation Efficiency Evaluator returns binary matching results between the agent's tool usage trajectory and the ground truth expected steps.
|
|
57
|
+
It has three matching techniques: exact match, in-order match (allows extra steps), and any-order match (allows extra steps and ignores order).
|
|
58
|
+
It also returns precision, recall, and F1 scores in properties bag.
|
|
59
|
+
|
|
60
|
+
:param matching_mode: The matching mode to use. Default is "exact_match".
|
|
61
|
+
:type matching_mode: enum[str, _TaskNavigationEfficiencyMatchingMode]
|
|
62
|
+
|
|
63
|
+
.. admonition:: Example:
|
|
64
|
+
|
|
65
|
+
.. code-block:: python
|
|
66
|
+
|
|
67
|
+
from azure.ai.evaluation._evaluators._task_navigation_efficiency import _TaskNavigationEfficiencyEvaluator
|
|
68
|
+
|
|
69
|
+
task_navigation_efficiency_eval = _TaskNavigationEfficiencyEvaluator(
|
|
70
|
+
matching_mode=_TaskNavigationEfficiencyMatchingMode.EXACT_MATCH
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Example 1: Using simple tool names list
|
|
74
|
+
result = path_efficiency_eval(
|
|
75
|
+
response=[
|
|
76
|
+
{"role": "assistant", "content": [{"type": "tool_call", "tool_call_id": "call_1", "name": "identify_tools_to_call", "arguments": {}}]},
|
|
77
|
+
{"role": "assistant", "content": [{"type": "tool_call", "tool_call_id": "call_2", "name": "call_tool_A", "arguments": {}}]},
|
|
78
|
+
{"role": "assistant", "content": [{"type": "tool_call", "tool_call_id": "call_3", "name": "call_tool_B", "arguments": {}}]},
|
|
79
|
+
{"role": "assistant", "content": [{"type": "tool_call", "tool_call_id": "call_4", "name": "response_synthesis", "arguments": {}}]}
|
|
80
|
+
],
|
|
81
|
+
ground_truth=["identify_tools_to_call", ""call_tool_A", "call_tool_B", "response_synthesis"]
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Example 2: Using tool names with parameters (exact parameter matching required)
|
|
85
|
+
result = path_efficiency_eval(
|
|
86
|
+
response=[
|
|
87
|
+
{"role": "assistant", "content": [{"type": "tool_call", "tool_call_id": "call_1", "name": "search", "arguments": {"query": "weather", "location": "NYC"}}]},
|
|
88
|
+
{"role": "assistant", "content": [{"type": "tool_call", "tool_call_id": "call_2", "name": "format_result", "arguments": {"format": "json"}}]}
|
|
89
|
+
],
|
|
90
|
+
ground_truth=(
|
|
91
|
+
["search", "format_result"],
|
|
92
|
+
{
|
|
93
|
+
"search": {"query": "weather", "location": "NYC"},
|
|
94
|
+
"format_result": {"format": "json"}
|
|
95
|
+
}
|
|
96
|
+
)
|
|
97
|
+
)
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
id = "azureai://built-in/evaluators/task_navigation_efficiency"
|
|
101
|
+
"""Evaluator identifier, experimental and to be used only with evaluation in cloud."""
|
|
102
|
+
|
|
103
|
+
matching_mode: _TaskNavigationEfficiencyMatchingMode
|
|
104
|
+
"""The matching mode to use."""
|
|
105
|
+
|
|
106
|
+
@override
|
|
107
|
+
def __init__(
|
|
108
|
+
self,
|
|
109
|
+
*,
|
|
110
|
+
matching_mode: Union[
|
|
111
|
+
str, _TaskNavigationEfficiencyMatchingMode
|
|
112
|
+
] = _TaskNavigationEfficiencyMatchingMode.EXACT_MATCH,
|
|
113
|
+
):
|
|
114
|
+
# Type checking for metric parameter
|
|
115
|
+
if isinstance(matching_mode, str):
|
|
116
|
+
try:
|
|
117
|
+
self.matching_mode = _TaskNavigationEfficiencyMatchingMode(matching_mode)
|
|
118
|
+
except ValueError:
|
|
119
|
+
raise ValueError(
|
|
120
|
+
f"matching_mode must be one of {[m.value for m in _TaskNavigationEfficiencyMatchingMode]}, got '{matching_mode}'"
|
|
121
|
+
)
|
|
122
|
+
elif isinstance(matching_mode, _TaskNavigationEfficiencyMatchingMode):
|
|
123
|
+
self.matching_mode = matching_mode
|
|
124
|
+
else:
|
|
125
|
+
raise EvaluationException(
|
|
126
|
+
f"matching_mode must be a string with one of {[m.value for m in _TaskNavigationEfficiencyMatchingMode]} or _TaskNavigationEfficiencyMatchingMode enum, got {type(matching_mode)}",
|
|
127
|
+
internal_message=str(matching_mode),
|
|
128
|
+
target=ErrorTarget.TASK_NAVIGATION_EFFICIENCY_EVALUATOR,
|
|
129
|
+
category=ErrorCategory.INVALID_VALUE,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
super().__init__()
|
|
133
|
+
|
|
134
|
+
def _prepare_steps_for_comparison(
|
|
135
|
+
self,
|
|
136
|
+
agent_tool_pairs: List[Tuple[str, Dict[str, Any]]],
|
|
137
|
+
ground_truth: List[str],
|
|
138
|
+
ground_truth_params: Dict[str, Dict[str, Any]],
|
|
139
|
+
use_parameter_matching: bool,
|
|
140
|
+
) -> Tuple[
|
|
141
|
+
List[Union[str, Tuple[str, Tuple]]],
|
|
142
|
+
List[Union[str, Tuple[str, Tuple]]],
|
|
143
|
+
]:
|
|
144
|
+
"""Prepare agent and ground truth steps for comparison based on parameter matching mode."""
|
|
145
|
+
agent_steps: List[Union[str, Tuple[str, Tuple]]] = []
|
|
146
|
+
ground_truth_steps: List[Union[str, Tuple[str, Tuple]]] = []
|
|
147
|
+
if use_parameter_matching:
|
|
148
|
+
# When parameter matching is enabled, we need to match both tool name and parameters
|
|
149
|
+
agent_steps = [(pair[0], tuple(sorted(pair[1].items()))) for pair in agent_tool_pairs]
|
|
150
|
+
ground_truth_steps = [
|
|
151
|
+
(name, tuple(sorted(ground_truth_params.get(name, {}).items()))) for name in ground_truth
|
|
152
|
+
]
|
|
153
|
+
else:
|
|
154
|
+
# When parameter matching is disabled, only compare tool names
|
|
155
|
+
agent_steps = [name for name, _ in agent_tool_pairs]
|
|
156
|
+
ground_truth_steps = [step for step in ground_truth]
|
|
157
|
+
|
|
158
|
+
return agent_steps, ground_truth_steps
|
|
159
|
+
|
|
160
|
+
def _calculate_precision_recall_f1_scores(self, agent_steps: List, ground_truth_steps: List) -> Dict[str, float]:
|
|
161
|
+
"""Calculate precision, recall, and F1 scores."""
|
|
162
|
+
if not agent_steps:
|
|
163
|
+
return {"precision_score": 0.0, "recall_score": 0.0, "f1_score": 0.0}
|
|
164
|
+
|
|
165
|
+
# Count occurrences of each step in both lists to handle duplicates
|
|
166
|
+
agent_steps_counts = Counter(agent_steps)
|
|
167
|
+
ground_truth_counts = Counter(ground_truth_steps)
|
|
168
|
+
|
|
169
|
+
# Calculate true positives by taking the minimum count for each common element
|
|
170
|
+
# For each step, count the intersection (min count) of agent and ground truth steps
|
|
171
|
+
true_positives = sum(
|
|
172
|
+
min(agent_steps_counts[step], ground_truth_counts[step])
|
|
173
|
+
for step in agent_steps_counts
|
|
174
|
+
if step in ground_truth_counts
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Calculate false positives (agent steps not in ground truth or excess occurrences)
|
|
178
|
+
# For each step, count the excess occurrences of agent steps not in (minus) ground truth
|
|
179
|
+
# or zero (agent steps minus agent steps) if agent steps is less than ground truth
|
|
180
|
+
false_positives = sum(
|
|
181
|
+
agent_steps_counts[step] - min(agent_steps_counts[step], ground_truth_counts.get(step, 0))
|
|
182
|
+
for step in agent_steps_counts
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Calculate false negatives (ground truth steps not in agent or missing occurrences)
|
|
186
|
+
# For each step, count the excess occurrences of ground truth steps not in (minus) agent steps
|
|
187
|
+
# or zero (ground truth steps minus ground truth steps) if ground truth steps is less than agent steps
|
|
188
|
+
false_negatives = sum(
|
|
189
|
+
ground_truth_counts[step] - min(ground_truth_counts[step], agent_steps_counts.get(step, 0))
|
|
190
|
+
for step in ground_truth_counts
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
# Calculate precision, recall, F1
|
|
194
|
+
precision = (
|
|
195
|
+
true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0.0
|
|
196
|
+
)
|
|
197
|
+
recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0.0
|
|
198
|
+
f1_score = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
|
|
199
|
+
|
|
200
|
+
return {
|
|
201
|
+
"precision_score": precision,
|
|
202
|
+
"recall_score": recall,
|
|
203
|
+
"f1_score": f1_score,
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
def _calculate_exact_match(self, agent_steps: List, ground_truth_steps: List) -> bool:
|
|
207
|
+
"""Check if agent steps exactly match ground truth (order and content)."""
|
|
208
|
+
return agent_steps == ground_truth_steps
|
|
209
|
+
|
|
210
|
+
def _calculate_in_order_match(self, agent_steps: List, ground_truth_steps: List) -> bool:
|
|
211
|
+
"""Check if all ground truth steps appear in agent steps in correct order (extra steps allowed)."""
|
|
212
|
+
if not ground_truth_steps:
|
|
213
|
+
return True
|
|
214
|
+
|
|
215
|
+
gt_index = 0
|
|
216
|
+
for step in agent_steps:
|
|
217
|
+
if gt_index < len(ground_truth_steps) and step == ground_truth_steps[gt_index]:
|
|
218
|
+
gt_index += 1
|
|
219
|
+
|
|
220
|
+
return gt_index == len(ground_truth_steps)
|
|
221
|
+
|
|
222
|
+
def _calculate_any_order_match(self, agent_steps: List, ground_truth_steps: List) -> bool:
|
|
223
|
+
"""Check if all ground truth steps appear in agent steps with sufficient frequency (any order, extra steps allowed)."""
|
|
224
|
+
# Count occurrences of each step in both lists to handle duplicates
|
|
225
|
+
agent_counts = Counter(agent_steps)
|
|
226
|
+
ground_truth_counts = Counter(ground_truth_steps)
|
|
227
|
+
|
|
228
|
+
# Check if agent has at least as many occurrences of each ground truth step
|
|
229
|
+
return all(agent_counts[step] >= ground_truth_counts[step] for step in ground_truth_counts)
|
|
230
|
+
|
|
231
|
+
_TASK_NAVIGATION_EFFICIENCY_MATCHING_MODE_TO_FUNCTIONS = {
|
|
232
|
+
_TaskNavigationEfficiencyMatchingMode.EXACT_MATCH: _calculate_exact_match,
|
|
233
|
+
_TaskNavigationEfficiencyMatchingMode.IN_ORDER_MATCH: _calculate_in_order_match,
|
|
234
|
+
_TaskNavigationEfficiencyMatchingMode.ANY_ORDER_MATCH: _calculate_any_order_match,
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
@override
|
|
238
|
+
async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[float, str, Dict[str, float]]]:
|
|
239
|
+
"""Produce a path efficiency evaluation result.
|
|
240
|
+
|
|
241
|
+
:param eval_input: The input to the evaluation function. Must contain "response" and "ground_truth".
|
|
242
|
+
:type eval_input: Dict
|
|
243
|
+
:return: The evaluation result.
|
|
244
|
+
:rtype: Dict[str, Union[float, str, Dict[str, float]]]
|
|
245
|
+
"""
|
|
246
|
+
response = eval_input["response"]
|
|
247
|
+
ground_truth = eval_input["ground_truth"]
|
|
248
|
+
|
|
249
|
+
# Value and type checking for ground truth steps
|
|
250
|
+
if not ground_truth:
|
|
251
|
+
raise ValueError("ground_truth cannot be empty")
|
|
252
|
+
|
|
253
|
+
# Check if ground_truth is a tuple (tool names + parameters) or list (tool names only)
|
|
254
|
+
use_parameter_matching = False
|
|
255
|
+
ground_truth_names = []
|
|
256
|
+
ground_truth_params_dict: Dict[str, Dict[str, Any]] = {}
|
|
257
|
+
|
|
258
|
+
if isinstance(ground_truth, tuple) and len(ground_truth) == 2:
|
|
259
|
+
# Tuple format: (tool_names, parameters_dict)
|
|
260
|
+
tool_names_list, params_dict = ground_truth
|
|
261
|
+
|
|
262
|
+
if not isinstance(tool_names_list, list) or not all(isinstance(name, str) for name in tool_names_list):
|
|
263
|
+
raise TypeError("ground_truth tuple first element must be a list of strings (tool names)")
|
|
264
|
+
|
|
265
|
+
if not isinstance(params_dict, dict):
|
|
266
|
+
raise TypeError(
|
|
267
|
+
"ground_truth tuple second element must be a dictionary mapping tool names to parameters"
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
# Validate that all values in params_dict are dictionaries with string keys and values
|
|
271
|
+
for tool_name, params in params_dict.items():
|
|
272
|
+
if not isinstance(tool_name, str):
|
|
273
|
+
raise TypeError("ground_truth parameters dictionary keys must be strings (tool names)")
|
|
274
|
+
if not isinstance(params, dict):
|
|
275
|
+
raise TypeError(f"ground_truth parameters for tool '{tool_name}' must be a dictionary")
|
|
276
|
+
for k, v in params.items():
|
|
277
|
+
if not isinstance(k, str):
|
|
278
|
+
raise TypeError(f"ground_truth parameters for tool '{tool_name}' must have string keys")
|
|
279
|
+
try:
|
|
280
|
+
json.dumps(v)
|
|
281
|
+
except (TypeError, ValueError):
|
|
282
|
+
raise TypeError(
|
|
283
|
+
f"ground_truth parameters for tool '{tool_name}' must have JSON-serializable values (got type {type(v)} for key '{k}')"
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
ground_truth_names = [name.strip() for name in tool_names_list]
|
|
287
|
+
ground_truth_params_dict = params_dict
|
|
288
|
+
use_parameter_matching = True
|
|
289
|
+
elif isinstance(ground_truth, list) and all(isinstance(step, str) for step in ground_truth):
|
|
290
|
+
# List format: just tool names
|
|
291
|
+
ground_truth_names = [step.strip() for step in ground_truth]
|
|
292
|
+
use_parameter_matching = False
|
|
293
|
+
else:
|
|
294
|
+
raise TypeError(
|
|
295
|
+
"ground_truth must be a list of strings or a tuple of (list[str], dict[str, dict[str, str]])"
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# Extract tool information from the response
|
|
299
|
+
agent_tool_pairs = self._extract_tool_names_and_params_from_response(response)
|
|
300
|
+
|
|
301
|
+
# Prepare steps for comparison
|
|
302
|
+
agent_steps, ground_truth_steps = self._prepare_steps_for_comparison(
|
|
303
|
+
agent_tool_pairs,
|
|
304
|
+
ground_truth_names,
|
|
305
|
+
ground_truth_params_dict,
|
|
306
|
+
use_parameter_matching,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# Calculate precision, recall, and F1 scores
|
|
310
|
+
additional_properties_metrics = self._calculate_precision_recall_f1_scores(agent_steps, ground_truth_steps)
|
|
311
|
+
|
|
312
|
+
# Convert metrics to floats, using nan for None or non-convertible values
|
|
313
|
+
for metric, score in additional_properties_metrics.items():
|
|
314
|
+
additional_properties_metrics[metric] = float(score) if score is not None else float("nan")
|
|
315
|
+
|
|
316
|
+
if self.matching_mode in self._TASK_NAVIGATION_EFFICIENCY_MATCHING_MODE_TO_FUNCTIONS:
|
|
317
|
+
# Calculate binary match metrics
|
|
318
|
+
match_result = self._TASK_NAVIGATION_EFFICIENCY_MATCHING_MODE_TO_FUNCTIONS[self.matching_mode](
|
|
319
|
+
self, agent_steps, ground_truth_steps
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
return {
|
|
323
|
+
"task_navigation_efficiency_label": match_result,
|
|
324
|
+
"task_navigation_efficiency_result": EVALUATION_PASS_FAIL_MAPPING[match_result],
|
|
325
|
+
"task_navigation_efficiency_details": additional_properties_metrics,
|
|
326
|
+
}
|
|
327
|
+
else:
|
|
328
|
+
raise EvaluationException(
|
|
329
|
+
f"Unsupported matching_mode '{self.matching_mode}'",
|
|
330
|
+
internal_message=str(self.matching_mode),
|
|
331
|
+
target=ErrorTarget.TASK_NAVIGATION_EFFICIENCY_EVALUATOR,
|
|
332
|
+
category=ErrorCategory.INVALID_VALUE,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
@overload
|
|
336
|
+
def __call__( # type: ignore
|
|
337
|
+
self, *, response: Union[str, List[Dict[str, Any]]], ground_truth: List[str]
|
|
338
|
+
) -> Dict[str, Union[float, str, Dict[str, float]]]:
|
|
339
|
+
"""
|
|
340
|
+
Evaluate the task navigation efficiency of an agent's action sequence.
|
|
341
|
+
|
|
342
|
+
:keyword response: The agent's response containing tool calls.
|
|
343
|
+
:paramtype response: Union[str, List[Dict[str, Any]]]
|
|
344
|
+
:keyword ground_truth: List of expected tool/action steps.
|
|
345
|
+
:paramtype ground_truth: List[str]
|
|
346
|
+
:return: The task navigation efficiency scores and results.
|
|
347
|
+
:rtype: Dict[str, Union[float, str, Dict[str, float]]]
|
|
348
|
+
"""
|
|
349
|
+
|
|
350
|
+
@overload
|
|
351
|
+
def __call__( # type: ignore
|
|
352
|
+
self,
|
|
353
|
+
*,
|
|
354
|
+
response: Union[str, List[Dict[str, Any]]],
|
|
355
|
+
ground_truth: Tuple[List[str], Dict[str, Dict[str, str]]],
|
|
356
|
+
) -> Dict[str, Union[float, str, Dict[str, float]]]:
|
|
357
|
+
"""
|
|
358
|
+
Evaluate the task navigation efficiency of an agent's action sequence with tool parameters.
|
|
359
|
+
|
|
360
|
+
:keyword response: The agent's response containing tool calls.
|
|
361
|
+
:paramtype response: Union[str, List[Dict[str, Any]]]
|
|
362
|
+
:keyword ground_truth: Tuple of (tool names list, parameters dict) where parameters must match exactly.
|
|
363
|
+
:paramtype ground_truth: Tuple[List[str], Dict[str, Dict[str, str]]]
|
|
364
|
+
:return: The task navigation efficiency scores and results.
|
|
365
|
+
:rtype: Dict[str, Union[float, str, Dict[str, float]]]
|
|
366
|
+
"""
|
|
367
|
+
|
|
368
|
+
@override
|
|
369
|
+
def __call__(
|
|
370
|
+
self,
|
|
371
|
+
*args,
|
|
372
|
+
**kwargs,
|
|
373
|
+
):
|
|
374
|
+
"""
|
|
375
|
+
Evaluate task navigation efficiency.
|
|
376
|
+
|
|
377
|
+
:keyword response: The agent's response containing tool calls.
|
|
378
|
+
:paramtype response: Union[str, List[Dict[str, Any]]]
|
|
379
|
+
:keyword ground_truth: List of expected tool/action steps or tuple of (tool names, parameters dict).
|
|
380
|
+
:paramtype ground_truth: Union[List[str], Tuple[List[str], Dict[str, Dict[str, str]]]]
|
|
381
|
+
:return: The task navigation efficiency scores and results.
|
|
382
|
+
:rtype: Dict[str, Union[float, str, Dict[str, float]]]
|
|
383
|
+
"""
|
|
384
|
+
return super().__call__(*args, **kwargs)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
|
|
5
|
+
from ._tool_call_accuracy import ToolCallAccuracyEvaluator
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"ToolCallAccuracyEvaluator",
|
|
9
|
+
]
|