azure-ai-evaluation 1.0.1__py3-none-any.whl → 1.13.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of azure-ai-evaluation might be problematic. Click here for more details.
- azure/ai/evaluation/__init__.py +85 -14
- azure/ai/evaluation/_aoai/__init__.py +10 -0
- azure/ai/evaluation/_aoai/aoai_grader.py +140 -0
- azure/ai/evaluation/_aoai/label_grader.py +68 -0
- azure/ai/evaluation/_aoai/python_grader.py +86 -0
- azure/ai/evaluation/_aoai/score_model_grader.py +94 -0
- azure/ai/evaluation/_aoai/string_check_grader.py +66 -0
- azure/ai/evaluation/_aoai/text_similarity_grader.py +80 -0
- azure/ai/evaluation/_azure/__init__.py +3 -0
- azure/ai/evaluation/_azure/_clients.py +204 -0
- azure/ai/evaluation/_azure/_envs.py +207 -0
- azure/ai/evaluation/_azure/_models.py +227 -0
- azure/ai/evaluation/_azure/_token_manager.py +129 -0
- azure/ai/evaluation/_common/__init__.py +9 -1
- azure/ai/evaluation/_common/constants.py +124 -2
- azure/ai/evaluation/_common/evaluation_onedp_client.py +169 -0
- azure/ai/evaluation/_common/onedp/__init__.py +32 -0
- azure/ai/evaluation/_common/onedp/_client.py +166 -0
- azure/ai/evaluation/_common/onedp/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/_model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/_serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_types.py +21 -0
- azure/ai/evaluation/_common/onedp/_utils/__init__.py +6 -0
- azure/ai/evaluation/_common/onedp/_utils/model_base.py +1232 -0
- azure/ai/evaluation/_common/onedp/_utils/serialization.py +2032 -0
- azure/ai/evaluation/_common/onedp/_validation.py +66 -0
- azure/ai/evaluation/_common/onedp/_vendor.py +50 -0
- azure/ai/evaluation/_common/onedp/_version.py +9 -0
- azure/ai/evaluation/_common/onedp/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/onedp/aio/_client.py +168 -0
- azure/ai/evaluation/_common/onedp/aio/_configuration.py +72 -0
- azure/ai/evaluation/_common/onedp/aio/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/aio/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_operations.py +7143 -0
- azure/ai/evaluation/_common/onedp/aio/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/models/__init__.py +358 -0
- azure/ai/evaluation/_common/onedp/models/_enums.py +447 -0
- azure/ai/evaluation/_common/onedp/models/_models.py +5963 -0
- azure/ai/evaluation/_common/onedp/models/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/operations/__init__.py +49 -0
- azure/ai/evaluation/_common/onedp/operations/_operations.py +8951 -0
- azure/ai/evaluation/_common/onedp/operations/_patch.py +21 -0
- azure/ai/evaluation/_common/onedp/py.typed +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/__init__.py +1 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/__init__.py +22 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_operations.py +29 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/buildingblocks/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_operations.py +34 -0
- azure/ai/evaluation/_common/onedp/servicepatterns/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/rai_service.py +578 -69
- azure/ai/evaluation/_common/raiclient/__init__.py +34 -0
- azure/ai/evaluation/_common/raiclient/_client.py +128 -0
- azure/ai/evaluation/_common/raiclient/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/_model_base.py +1235 -0
- azure/ai/evaluation/_common/raiclient/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/_serialization.py +2050 -0
- azure/ai/evaluation/_common/raiclient/_version.py +9 -0
- azure/ai/evaluation/_common/raiclient/aio/__init__.py +29 -0
- azure/ai/evaluation/_common/raiclient/aio/_client.py +130 -0
- azure/ai/evaluation/_common/raiclient/aio/_configuration.py +87 -0
- azure/ai/evaluation/_common/raiclient/aio/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_operations.py +981 -0
- azure/ai/evaluation/_common/raiclient/aio/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/models/__init__.py +60 -0
- azure/ai/evaluation/_common/raiclient/models/_enums.py +18 -0
- azure/ai/evaluation/_common/raiclient/models/_models.py +651 -0
- azure/ai/evaluation/_common/raiclient/models/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/operations/__init__.py +25 -0
- azure/ai/evaluation/_common/raiclient/operations/_operations.py +1238 -0
- azure/ai/evaluation/_common/raiclient/operations/_patch.py +20 -0
- azure/ai/evaluation/_common/raiclient/py.typed +1 -0
- azure/ai/evaluation/_common/utils.py +505 -27
- azure/ai/evaluation/_constants.py +147 -0
- azure/ai/evaluation/_converters/__init__.py +3 -0
- azure/ai/evaluation/_converters/_ai_services.py +899 -0
- azure/ai/evaluation/_converters/_models.py +467 -0
- azure/ai/evaluation/_converters/_sk_services.py +495 -0
- azure/ai/evaluation/_eval_mapping.py +87 -0
- azure/ai/evaluation/_evaluate/_batch_run/__init__.py +10 -2
- azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +176 -0
- azure/ai/evaluation/_evaluate/_batch_run/batch_clients.py +82 -0
- azure/ai/evaluation/_evaluate/_batch_run/code_client.py +18 -12
- azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +19 -6
- azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +47 -22
- azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +18 -2
- azure/ai/evaluation/_evaluate/_eval_run.py +32 -46
- azure/ai/evaluation/_evaluate/_evaluate.py +1809 -142
- azure/ai/evaluation/_evaluate/_evaluate_aoai.py +992 -0
- azure/ai/evaluation/_evaluate/_telemetry/__init__.py +5 -90
- azure/ai/evaluation/_evaluate/_utils.py +237 -42
- azure/ai/evaluation/_evaluator_definition.py +76 -0
- azure/ai/evaluation/_evaluators/_bleu/_bleu.py +80 -28
- azure/ai/evaluation/_evaluators/_code_vulnerability/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +119 -0
- azure/ai/evaluation/_evaluators/_coherence/_coherence.py +40 -4
- azure/ai/evaluation/_evaluators/_common/__init__.py +2 -0
- azure/ai/evaluation/_evaluators/_common/_base_eval.py +430 -29
- azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +63 -0
- azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +269 -12
- azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +74 -9
- azure/ai/evaluation/_evaluators/_common/_conversation_aggregators.py +49 -0
- azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +73 -53
- azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +35 -5
- azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +26 -5
- azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +35 -5
- azure/ai/evaluation/_evaluators/_content_safety/_violence.py +34 -4
- azure/ai/evaluation/_evaluators/_document_retrieval/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_document_retrieval/_document_retrieval.py +442 -0
- azure/ai/evaluation/_evaluators/_eci/_eci.py +6 -3
- azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +97 -70
- azure/ai/evaluation/_evaluators/_fluency/_fluency.py +39 -3
- azure/ai/evaluation/_evaluators/_gleu/_gleu.py +80 -25
- azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +230 -20
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +30 -29
- azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +19 -14
- azure/ai/evaluation/_evaluators/_intent_resolution/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +196 -0
- azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +275 -0
- azure/ai/evaluation/_evaluators/_meteor/_meteor.py +89 -36
- azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +22 -4
- azure/ai/evaluation/_evaluators/_qa/_qa.py +94 -35
- azure/ai/evaluation/_evaluators/_relevance/_relevance.py +100 -4
- azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +154 -56
- azure/ai/evaluation/_evaluators/_response_completeness/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +202 -0
- azure/ai/evaluation/_evaluators/_response_completeness/response_completeness.prompty +84 -0
- azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +39 -3
- azure/ai/evaluation/_evaluators/_rouge/_rouge.py +166 -26
- azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +38 -7
- azure/ai/evaluation/_evaluators/_similarity/_similarity.py +81 -85
- azure/ai/evaluation/_evaluators/_task_adherence/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +226 -0
- azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +101 -0
- azure/ai/evaluation/_evaluators/_task_completion/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_completion/_task_completion.py +177 -0
- azure/ai/evaluation/_evaluators/_task_completion/task_completion.prompty +220 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_task_navigation_efficiency/_task_navigation_efficiency.py +384 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +298 -0
- azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +166 -0
- azure/ai/evaluation/_evaluators/_tool_call_success/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_call_success/_tool_call_success.py +306 -0
- azure/ai/evaluation/_evaluators/_tool_call_success/tool_call_success.prompty +321 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/_tool_input_accuracy.py +263 -0
- azure/ai/evaluation/_evaluators/_tool_input_accuracy/tool_input_accuracy.prompty +76 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/__init__.py +7 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/_tool_output_utilization.py +225 -0
- azure/ai/evaluation/_evaluators/_tool_output_utilization/tool_output_utilization.prompty +221 -0
- azure/ai/evaluation/_evaluators/_tool_selection/__init__.py +9 -0
- azure/ai/evaluation/_evaluators/_tool_selection/_tool_selection.py +266 -0
- azure/ai/evaluation/_evaluators/_tool_selection/tool_selection.prompty +104 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/__init__.py +5 -0
- azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +102 -0
- azure/ai/evaluation/_evaluators/_xpia/xpia.py +20 -4
- azure/ai/evaluation/_exceptions.py +24 -1
- azure/ai/evaluation/_http_utils.py +7 -5
- azure/ai/evaluation/_legacy/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_adapters/__init__.py +7 -0
- azure/ai/evaluation/_legacy/_adapters/_check.py +17 -0
- azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
- azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
- azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
- azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
- azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
- azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
- azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
- azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
- azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
- azure/ai/evaluation/_legacy/_batch_engine/__init__.py +9 -0
- azure/ai/evaluation/_legacy/_batch_engine/_config.py +48 -0
- azure/ai/evaluation/_legacy/_batch_engine/_engine.py +477 -0
- azure/ai/evaluation/_legacy/_batch_engine/_exceptions.py +88 -0
- azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +132 -0
- azure/ai/evaluation/_legacy/_batch_engine/_result.py +107 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run.py +127 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_storage.py +128 -0
- azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +262 -0
- azure/ai/evaluation/_legacy/_batch_engine/_status.py +25 -0
- azure/ai/evaluation/_legacy/_batch_engine/_trace.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils.py +97 -0
- azure/ai/evaluation/_legacy/_batch_engine/_utils_deprecated.py +131 -0
- azure/ai/evaluation/_legacy/_common/__init__.py +3 -0
- azure/ai/evaluation/_legacy/_common/_async_token_provider.py +117 -0
- azure/ai/evaluation/_legacy/_common/_logging.py +292 -0
- azure/ai/evaluation/_legacy/_common/_thread_pool_executor_with_context.py +17 -0
- azure/ai/evaluation/_legacy/prompty/__init__.py +36 -0
- azure/ai/evaluation/_legacy/prompty/_connection.py +119 -0
- azure/ai/evaluation/_legacy/prompty/_exceptions.py +139 -0
- azure/ai/evaluation/_legacy/prompty/_prompty.py +430 -0
- azure/ai/evaluation/_legacy/prompty/_utils.py +663 -0
- azure/ai/evaluation/_legacy/prompty/_yaml_utils.py +99 -0
- azure/ai/evaluation/_model_configurations.py +26 -0
- azure/ai/evaluation/_safety_evaluation/__init__.py +3 -0
- azure/ai/evaluation/_safety_evaluation/_generated_rai_client.py +0 -0
- azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +917 -0
- azure/ai/evaluation/_user_agent.py +32 -1
- azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +0 -4
- azure/ai/evaluation/_vendor/rouge_score/scoring.py +0 -4
- azure/ai/evaluation/_vendor/rouge_score/tokenize.py +0 -4
- azure/ai/evaluation/_version.py +2 -1
- azure/ai/evaluation/red_team/__init__.py +22 -0
- azure/ai/evaluation/red_team/_agent/__init__.py +3 -0
- azure/ai/evaluation/red_team/_agent/_agent_functions.py +261 -0
- azure/ai/evaluation/red_team/_agent/_agent_tools.py +461 -0
- azure/ai/evaluation/red_team/_agent/_agent_utils.py +89 -0
- azure/ai/evaluation/red_team/_agent/_semantic_kernel_plugin.py +228 -0
- azure/ai/evaluation/red_team/_attack_objective_generator.py +268 -0
- azure/ai/evaluation/red_team/_attack_strategy.py +49 -0
- azure/ai/evaluation/red_team/_callback_chat_target.py +115 -0
- azure/ai/evaluation/red_team/_default_converter.py +21 -0
- azure/ai/evaluation/red_team/_evaluation_processor.py +505 -0
- azure/ai/evaluation/red_team/_mlflow_integration.py +430 -0
- azure/ai/evaluation/red_team/_orchestrator_manager.py +803 -0
- azure/ai/evaluation/red_team/_red_team.py +1717 -0
- azure/ai/evaluation/red_team/_red_team_result.py +661 -0
- azure/ai/evaluation/red_team/_result_processor.py +1708 -0
- azure/ai/evaluation/red_team/_utils/__init__.py +37 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_eval_chat_target.py +128 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_target.py +601 -0
- azure/ai/evaluation/red_team/_utils/_rai_service_true_false_scorer.py +114 -0
- azure/ai/evaluation/red_team/_utils/constants.py +72 -0
- azure/ai/evaluation/red_team/_utils/exception_utils.py +345 -0
- azure/ai/evaluation/red_team/_utils/file_utils.py +266 -0
- azure/ai/evaluation/red_team/_utils/formatting_utils.py +365 -0
- azure/ai/evaluation/red_team/_utils/logging_utils.py +139 -0
- azure/ai/evaluation/red_team/_utils/metric_mapping.py +73 -0
- azure/ai/evaluation/red_team/_utils/objective_utils.py +46 -0
- azure/ai/evaluation/red_team/_utils/progress_utils.py +252 -0
- azure/ai/evaluation/red_team/_utils/retry_utils.py +218 -0
- azure/ai/evaluation/red_team/_utils/strategy_utils.py +218 -0
- azure/ai/evaluation/simulator/_adversarial_scenario.py +6 -0
- azure/ai/evaluation/simulator/_adversarial_simulator.py +187 -80
- azure/ai/evaluation/simulator/_constants.py +1 -0
- azure/ai/evaluation/simulator/_conversation/__init__.py +138 -11
- azure/ai/evaluation/simulator/_conversation/_conversation.py +6 -2
- azure/ai/evaluation/simulator/_conversation/constants.py +1 -1
- azure/ai/evaluation/simulator/_direct_attack_simulator.py +37 -24
- azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +1 -0
- azure/ai/evaluation/simulator/_indirect_attack_simulator.py +56 -28
- azure/ai/evaluation/simulator/_model_tools/__init__.py +2 -1
- azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +225 -0
- azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +12 -10
- azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +100 -45
- azure/ai/evaluation/simulator/_model_tools/_rai_client.py +101 -3
- azure/ai/evaluation/simulator/_model_tools/_template_handler.py +31 -11
- azure/ai/evaluation/simulator/_model_tools/models.py +20 -17
- azure/ai/evaluation/simulator/_simulator.py +43 -19
- {azure_ai_evaluation-1.0.1.dist-info → azure_ai_evaluation-1.13.5.dist-info}/METADATA +378 -27
- azure_ai_evaluation-1.13.5.dist-info/RECORD +305 -0
- {azure_ai_evaluation-1.0.1.dist-info → azure_ai_evaluation-1.13.5.dist-info}/WHEEL +1 -1
- azure/ai/evaluation/_evaluators/_multimodal/__init__.py +0 -20
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +0 -132
- azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal_base.py +0 -55
- azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +0 -124
- azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +0 -100
- azure/ai/evaluation/_evaluators/_multimodal/_violence.py +0 -100
- azure/ai/evaluation/simulator/_tracing.py +0 -89
- azure_ai_evaluation-1.0.1.dist-info/RECORD +0 -119
- {azure_ai_evaluation-1.0.1.dist-info → azure_ai_evaluation-1.13.5.dist-info/licenses}/NOTICE.txt +0 -0
- {azure_ai_evaluation-1.0.1.dist-info → azure_ai_evaluation-1.13.5.dist-info}/top_level.txt +0 -0
|
@@ -9,16 +9,14 @@ import logging
|
|
|
9
9
|
from typing import Callable, Dict, Literal, Optional, Union, cast
|
|
10
10
|
|
|
11
11
|
import pandas as pd
|
|
12
|
-
from
|
|
13
|
-
from
|
|
14
|
-
from
|
|
15
|
-
from
|
|
16
|
-
from promptflow.core import Prompty as prompty_core
|
|
12
|
+
from azure.ai.evaluation._legacy._adapters._flows import FlexFlow as flex_flow
|
|
13
|
+
from azure.ai.evaluation._legacy._adapters._flows import AsyncPrompty as prompty_sdk
|
|
14
|
+
from azure.ai.evaluation._legacy._adapters._flows import Flow as dag_flow
|
|
15
|
+
from azure.ai.evaluation._legacy._adapters.client import PFClient
|
|
17
16
|
from typing_extensions import ParamSpec
|
|
18
17
|
|
|
19
18
|
from azure.ai.evaluation._model_configurations import AzureAIProject, EvaluationResult
|
|
20
19
|
|
|
21
|
-
from ..._user_agent import USER_AGENT
|
|
22
20
|
from .._utils import _trace_destination_from_project_scope
|
|
23
21
|
|
|
24
22
|
LOGGER = logging.getLogger(__name__)
|
|
@@ -66,7 +64,7 @@ def _get_evaluator_properties(evaluator, evaluator_name):
|
|
|
66
64
|
|
|
67
65
|
try:
|
|
68
66
|
# Cover flex flow and prompty based evaluator
|
|
69
|
-
if isinstance(evaluator, (prompty_sdk,
|
|
67
|
+
if isinstance(evaluator, (prompty_sdk, flex_flow)):
|
|
70
68
|
name = evaluator.name
|
|
71
69
|
pf_type = evaluator.__class__.__name__
|
|
72
70
|
# Cover dag flow based evaluator
|
|
@@ -94,86 +92,3 @@ def _get_evaluator_properties(evaluator, evaluator_name):
|
|
|
94
92
|
"type": _get_evaluator_type(evaluator),
|
|
95
93
|
"alias": evaluator_name if evaluator_name else "",
|
|
96
94
|
}
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
# cspell:ignore isna
|
|
100
|
-
def log_evaluate_activity(func: Callable[P, EvaluationResult]) -> Callable[P, EvaluationResult]:
|
|
101
|
-
"""Decorator to log evaluate activity
|
|
102
|
-
|
|
103
|
-
:param func: The function to be decorated
|
|
104
|
-
:type func: Callable
|
|
105
|
-
:returns: The decorated function
|
|
106
|
-
:rtype: Callable[P, EvaluationResult]
|
|
107
|
-
"""
|
|
108
|
-
|
|
109
|
-
@functools.wraps(func)
|
|
110
|
-
def wrapper(*args: P.args, **kwargs: P.kwargs) -> EvaluationResult:
|
|
111
|
-
from promptflow._sdk._telemetry import ActivityType, log_activity
|
|
112
|
-
from promptflow._sdk._telemetry.telemetry import get_telemetry_logger
|
|
113
|
-
|
|
114
|
-
evaluators = cast(Optional[Dict[str, Callable]], kwargs.get("evaluators", {})) or {}
|
|
115
|
-
azure_ai_project = cast(Optional[AzureAIProject], kwargs.get("azure_ai_project", None))
|
|
116
|
-
|
|
117
|
-
pf_client = PFClient(
|
|
118
|
-
config=(
|
|
119
|
-
{"trace.destination": _trace_destination_from_project_scope(azure_ai_project)}
|
|
120
|
-
if azure_ai_project
|
|
121
|
-
else None
|
|
122
|
-
),
|
|
123
|
-
user_agent=USER_AGENT,
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
trace_destination = pf_client._config.get_trace_destination() # pylint: disable=protected-access
|
|
127
|
-
track_in_cloud = bool(trace_destination) if trace_destination != "none" else False
|
|
128
|
-
evaluate_target = bool(kwargs.get("target", None))
|
|
129
|
-
evaluator_config = bool(kwargs.get("evaluator_config", None))
|
|
130
|
-
custom_dimensions: Dict[str, Union[str, bool]] = {
|
|
131
|
-
"track_in_cloud": track_in_cloud,
|
|
132
|
-
"evaluate_target": evaluate_target,
|
|
133
|
-
"evaluator_config": evaluator_config,
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
with log_activity(
|
|
137
|
-
get_telemetry_logger(),
|
|
138
|
-
"pf.evals.evaluate",
|
|
139
|
-
activity_type=ActivityType.PUBLICAPI,
|
|
140
|
-
user_agent=USER_AGENT,
|
|
141
|
-
custom_dimensions=custom_dimensions,
|
|
142
|
-
):
|
|
143
|
-
result = func(*args, **kwargs)
|
|
144
|
-
|
|
145
|
-
try:
|
|
146
|
-
evaluators_info = []
|
|
147
|
-
for evaluator_name, evaluator in evaluators.items():
|
|
148
|
-
evaluator_info = _get_evaluator_properties(evaluator, evaluator_name)
|
|
149
|
-
try:
|
|
150
|
-
evaluator_df = pd.DataFrame(result.get("rows", [])).filter(
|
|
151
|
-
like=f"outputs.{evaluator_name}", axis=1
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
failed_rows = (
|
|
155
|
-
evaluator_df.shape[0] if evaluator_df.empty else int(evaluator_df.isna().any(axis=1).sum())
|
|
156
|
-
)
|
|
157
|
-
total_rows = evaluator_df.shape[0]
|
|
158
|
-
|
|
159
|
-
evaluator_info["failed_rows"] = failed_rows
|
|
160
|
-
evaluator_info["total_rows"] = total_rows
|
|
161
|
-
except Exception as e: # pylint: disable=broad-exception-caught
|
|
162
|
-
LOGGER.debug("Failed to collect evaluate failed row info for %s: %s", evaluator_name, e)
|
|
163
|
-
evaluators_info.append(evaluator_info)
|
|
164
|
-
|
|
165
|
-
custom_dimensions = {"evaluators_info": json.dumps(evaluators_info)}
|
|
166
|
-
with log_activity(
|
|
167
|
-
get_telemetry_logger(),
|
|
168
|
-
"pf.evals.evaluate_usage_info",
|
|
169
|
-
activity_type=ActivityType.PUBLICAPI,
|
|
170
|
-
user_agent=USER_AGENT,
|
|
171
|
-
custom_dimensions=custom_dimensions,
|
|
172
|
-
):
|
|
173
|
-
pass
|
|
174
|
-
except Exception as e: # pylint: disable=broad-exception-caught
|
|
175
|
-
LOGGER.debug("Failed to collect evaluate usage info: %s", e)
|
|
176
|
-
|
|
177
|
-
return result
|
|
178
|
-
|
|
179
|
-
return wrapper
|
|
@@ -7,13 +7,17 @@ import os
|
|
|
7
7
|
import re
|
|
8
8
|
import tempfile
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
|
|
10
|
+
import time
|
|
11
|
+
from typing import Any, Dict, List, NamedTuple, Optional, Union, cast
|
|
11
12
|
import uuid
|
|
12
13
|
import base64
|
|
14
|
+
import math
|
|
13
15
|
|
|
14
16
|
import pandas as pd
|
|
15
|
-
from
|
|
16
|
-
|
|
17
|
+
from tqdm import tqdm
|
|
18
|
+
|
|
19
|
+
from azure.core.pipeline.policies import UserAgentPolicy
|
|
20
|
+
from azure.ai.evaluation._legacy._adapters.entities import Run
|
|
17
21
|
|
|
18
22
|
from azure.ai.evaluation._constants import (
|
|
19
23
|
DEFAULT_EVALUATION_RESULTS_FILE_NAME,
|
|
@@ -22,7 +26,10 @@ from azure.ai.evaluation._constants import (
|
|
|
22
26
|
Prefixes,
|
|
23
27
|
)
|
|
24
28
|
from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
|
|
25
|
-
from azure.ai.evaluation._model_configurations import AzureAIProject
|
|
29
|
+
from azure.ai.evaluation._model_configurations import AzureAIProject, EvaluationResult
|
|
30
|
+
from azure.ai.evaluation._version import VERSION
|
|
31
|
+
from azure.ai.evaluation._user_agent import UserAgentSingleton
|
|
32
|
+
from azure.ai.evaluation._azure._clients import LiteMLClient
|
|
26
33
|
|
|
27
34
|
LOGGER = logging.getLogger(__name__)
|
|
28
35
|
|
|
@@ -45,6 +52,8 @@ def is_none(value) -> bool:
|
|
|
45
52
|
def extract_workspace_triad_from_trace_provider( # pylint: disable=name-too-long
|
|
46
53
|
trace_provider: str,
|
|
47
54
|
) -> AzureMLWorkspace:
|
|
55
|
+
from azure.ai.evaluation._legacy._adapters.utils import get_workspace_triad_from_local
|
|
56
|
+
|
|
48
57
|
match = re.match(AZURE_WORKSPACE_REGEX_FORMAT, trace_provider)
|
|
49
58
|
if not match or len(match.groups()) != 5:
|
|
50
59
|
raise EvaluationException(
|
|
@@ -58,10 +67,20 @@ def extract_workspace_triad_from_trace_provider( # pylint: disable=name-too-lon
|
|
|
58
67
|
category=ErrorCategory.INVALID_VALUE,
|
|
59
68
|
blame=ErrorBlame.UNKNOWN,
|
|
60
69
|
)
|
|
70
|
+
|
|
61
71
|
subscription_id = match.group(1)
|
|
62
72
|
resource_group_name = match.group(3)
|
|
63
73
|
workspace_name = match.group(5)
|
|
64
|
-
|
|
74
|
+
|
|
75
|
+
# In theory this if statement should never evaluate to True, but we'll keep it here just in case
|
|
76
|
+
# for backwards compatibility with what the original code that depended on promptflow-azure did
|
|
77
|
+
if not (subscription_id and resource_group_name and workspace_name):
|
|
78
|
+
local = get_workspace_triad_from_local()
|
|
79
|
+
subscription_id = subscription_id or local.subscription_id or os.getenv("AZUREML_ARM_SUBSCRIPTION")
|
|
80
|
+
resource_group_name = resource_group_name or local.resource_group_name or os.getenv("AZUREML_ARM_RESOURCEGROUP")
|
|
81
|
+
workspace_name = workspace_name or local.workspace_name or os.getenv("AZUREML_ARM_WORKSPACE_NAME")
|
|
82
|
+
|
|
83
|
+
return AzureMLWorkspace(subscription_id or "", resource_group_name or "", workspace_name or "")
|
|
65
84
|
|
|
66
85
|
|
|
67
86
|
def load_jsonl(path):
|
|
@@ -69,19 +88,6 @@ def load_jsonl(path):
|
|
|
69
88
|
return [json.loads(line) for line in f.readlines()]
|
|
70
89
|
|
|
71
90
|
|
|
72
|
-
def _azure_pf_client_and_triad(trace_destination) -> Tuple[PFClient, AzureMLWorkspace]:
|
|
73
|
-
from promptflow.azure._cli._utils import _get_azure_pf_client
|
|
74
|
-
|
|
75
|
-
ws_triad = extract_workspace_triad_from_trace_provider(trace_destination)
|
|
76
|
-
azure_pf_client = _get_azure_pf_client(
|
|
77
|
-
subscription_id=ws_triad.subscription_id,
|
|
78
|
-
resource_group=ws_triad.resource_group_name,
|
|
79
|
-
workspace_name=ws_triad.workspace_name,
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
return azure_pf_client, ws_triad
|
|
83
|
-
|
|
84
|
-
|
|
85
91
|
def _store_multimodal_content(messages, tmpdir: str):
|
|
86
92
|
# verify if images folder exists
|
|
87
93
|
images_folder_path = os.path.join(tmpdir, "images")
|
|
@@ -91,31 +97,135 @@ def _store_multimodal_content(messages, tmpdir: str):
|
|
|
91
97
|
for message in messages:
|
|
92
98
|
if isinstance(message.get("content", []), list):
|
|
93
99
|
for content in message.get("content", []):
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
100
|
+
process_message_content(content, images_folder_path)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def process_message_content(content, images_folder_path):
|
|
104
|
+
if content.get("type", "") == "image_url":
|
|
105
|
+
image_url = content.get("image_url")
|
|
106
|
+
|
|
107
|
+
if not image_url or "url" not in image_url:
|
|
108
|
+
return None
|
|
99
109
|
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
110
|
+
url = image_url["url"]
|
|
111
|
+
if not url.startswith("data:image/"):
|
|
112
|
+
return None
|
|
103
113
|
|
|
104
|
-
|
|
105
|
-
|
|
114
|
+
match = re.search("data:image/([^;]+);", url)
|
|
115
|
+
if not match:
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
ext = match.group(1)
|
|
119
|
+
# Extract the base64 string
|
|
120
|
+
base64image = image_url["url"].replace(f"data:image/{ext};base64,", "")
|
|
121
|
+
|
|
122
|
+
# Generate a unique filename
|
|
123
|
+
image_file_name = f"{str(uuid.uuid4())}.{ext}"
|
|
124
|
+
image_url["url"] = f"images/{image_file_name}" # Replace the base64 URL with the file path
|
|
125
|
+
|
|
126
|
+
# Decode the base64 string to binary image data
|
|
127
|
+
image_data_binary = base64.b64decode(base64image)
|
|
128
|
+
|
|
129
|
+
# Write the binary image data to the file
|
|
130
|
+
image_file_path = os.path.join(images_folder_path, image_file_name)
|
|
131
|
+
with open(image_file_path, "wb") as f:
|
|
132
|
+
f.write(image_data_binary)
|
|
133
|
+
return None
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _log_metrics_and_instance_results_onedp(
|
|
137
|
+
metrics: Dict[str, Any],
|
|
138
|
+
instance_results: pd.DataFrame,
|
|
139
|
+
project_url: str,
|
|
140
|
+
evaluation_name: Optional[str],
|
|
141
|
+
name_map: Dict[str, str],
|
|
142
|
+
tags: Optional[Dict[str, str]] = None,
|
|
143
|
+
**kwargs,
|
|
144
|
+
) -> Optional[str]:
|
|
145
|
+
|
|
146
|
+
# One RP Client
|
|
147
|
+
from azure.ai.evaluation._azure._token_manager import AzureMLTokenManager
|
|
148
|
+
from azure.ai.evaluation._constants import TokenScope
|
|
149
|
+
from azure.ai.evaluation._common import EvaluationServiceOneDPClient, EvaluationUpload
|
|
150
|
+
|
|
151
|
+
credentials = AzureMLTokenManager(
|
|
152
|
+
TokenScope.COGNITIVE_SERVICES_MANAGEMENT.value, LOGGER, credential=kwargs.get("credential")
|
|
153
|
+
)
|
|
154
|
+
client = EvaluationServiceOneDPClient(
|
|
155
|
+
endpoint=project_url,
|
|
156
|
+
credential=credentials,
|
|
157
|
+
user_agent_policy=UserAgentPolicy(base_user_agent=UserAgentSingleton().value),
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Massaging before artifacts are put on disk
|
|
161
|
+
# Adding line_number as index column this is needed by UI to form link to individual instance run
|
|
162
|
+
instance_results["line_number"] = instance_results.index.values
|
|
106
163
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
164
|
+
artifact_name = "instance_results.jsonl"
|
|
165
|
+
|
|
166
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
167
|
+
# storing multi_modal images if exists
|
|
168
|
+
col_name = "inputs.conversation"
|
|
169
|
+
if col_name in instance_results.columns:
|
|
170
|
+
for item in instance_results[col_name].items():
|
|
171
|
+
value = item[1]
|
|
172
|
+
if "messages" in value:
|
|
173
|
+
_store_multimodal_content(value["messages"], tmpdir)
|
|
174
|
+
|
|
175
|
+
# storing artifact result
|
|
176
|
+
tmp_path = os.path.join(tmpdir, artifact_name)
|
|
177
|
+
|
|
178
|
+
with open(tmp_path, "w", encoding=DefaultOpenEncoding.WRITE) as f:
|
|
179
|
+
f.write(instance_results.to_json(orient="records", lines=True))
|
|
180
|
+
|
|
181
|
+
properties = {
|
|
182
|
+
EvaluationRunProperties.RUN_TYPE: "eval_run",
|
|
183
|
+
EvaluationRunProperties.EVALUATION_SDK: f"azure-ai-evaluation:{VERSION}",
|
|
184
|
+
"_azureml.evaluate_artifacts": json.dumps([{"path": artifact_name, "type": "table"}]),
|
|
185
|
+
}
|
|
186
|
+
properties.update(_convert_name_map_into_property_entries(name_map))
|
|
187
|
+
|
|
188
|
+
create_evaluation_result_response = client.create_evaluation_result(
|
|
189
|
+
name=uuid.uuid4(), path=tmpdir, metrics=metrics
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
upload_run_response = client.start_evaluation_run(
|
|
193
|
+
evaluation=EvaluationUpload(
|
|
194
|
+
display_name=evaluation_name,
|
|
195
|
+
properties=properties,
|
|
196
|
+
tags=tags,
|
|
197
|
+
)
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# TODO: type mis-match because Evaluation instance is assigned to EvaluationRun
|
|
201
|
+
evaluation_id = (
|
|
202
|
+
upload_run_response.name # type: ignore[attr-defined]
|
|
203
|
+
if hasattr(upload_run_response, "name")
|
|
204
|
+
else upload_run_response.id
|
|
205
|
+
)
|
|
206
|
+
update_run_response = client.update_evaluation_run(
|
|
207
|
+
name=evaluation_id,
|
|
208
|
+
evaluation=EvaluationUpload(
|
|
209
|
+
display_name=evaluation_name,
|
|
210
|
+
status="Completed",
|
|
211
|
+
outputs={
|
|
212
|
+
"evaluationResultId": create_evaluation_result_response.id,
|
|
213
|
+
},
|
|
214
|
+
),
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
return update_run_response.properties.get("AiStudioEvaluationUri")
|
|
111
218
|
|
|
112
219
|
|
|
113
220
|
def _log_metrics_and_instance_results(
|
|
114
221
|
metrics: Dict[str, Any],
|
|
115
222
|
instance_results: pd.DataFrame,
|
|
116
223
|
trace_destination: Optional[str],
|
|
117
|
-
run: Run,
|
|
224
|
+
run: Optional[Run],
|
|
118
225
|
evaluation_name: Optional[str],
|
|
226
|
+
name_map: Dict[str, str],
|
|
227
|
+
tags: Optional[Dict[str, str]] = None,
|
|
228
|
+
**kwargs,
|
|
119
229
|
) -> Optional[str]:
|
|
120
230
|
from azure.ai.evaluation._evaluate._eval_run import EvalRun
|
|
121
231
|
|
|
@@ -123,20 +233,28 @@ def _log_metrics_and_instance_results(
|
|
|
123
233
|
LOGGER.debug("Skip uploading evaluation results to AI Studio since no trace destination was provided.")
|
|
124
234
|
return None
|
|
125
235
|
|
|
126
|
-
|
|
127
|
-
|
|
236
|
+
ws_triad = extract_workspace_triad_from_trace_provider(trace_destination)
|
|
237
|
+
management_client = LiteMLClient(
|
|
238
|
+
subscription_id=ws_triad.subscription_id,
|
|
239
|
+
resource_group=ws_triad.resource_group_name,
|
|
240
|
+
logger=LOGGER,
|
|
241
|
+
credential=kwargs.get("credential"),
|
|
242
|
+
# let the client automatically determine the credentials to use
|
|
243
|
+
)
|
|
244
|
+
tracking_uri = management_client.workspace_get_info(ws_triad.workspace_name).ml_flow_tracking_uri
|
|
128
245
|
|
|
129
246
|
# Adding line_number as index column this is needed by UI to form link to individual instance run
|
|
130
247
|
instance_results["line_number"] = instance_results.index.values
|
|
131
248
|
|
|
132
249
|
with EvalRun(
|
|
133
250
|
run_name=run.name if run is not None else evaluation_name,
|
|
134
|
-
tracking_uri=tracking_uri,
|
|
251
|
+
tracking_uri=cast(str, tracking_uri),
|
|
135
252
|
subscription_id=ws_triad.subscription_id,
|
|
136
253
|
group_name=ws_triad.resource_group_name,
|
|
137
254
|
workspace_name=ws_triad.workspace_name,
|
|
138
|
-
|
|
255
|
+
management_client=management_client,
|
|
139
256
|
promptflow_run=run,
|
|
257
|
+
tags=tags,
|
|
140
258
|
) as ev_run:
|
|
141
259
|
artifact_name = EvalRun.EVALUATION_ARTIFACT
|
|
142
260
|
|
|
@@ -162,11 +280,18 @@ def _log_metrics_and_instance_results(
|
|
|
162
280
|
# adding these properties to avoid showing traces if a dummy run is created.
|
|
163
281
|
# We are doing that only for the pure evaluation runs.
|
|
164
282
|
if run is None:
|
|
283
|
+
properties = {
|
|
284
|
+
EvaluationRunProperties.RUN_TYPE: "eval_run",
|
|
285
|
+
EvaluationRunProperties.EVALUATION_RUN: "promptflow.BatchRun",
|
|
286
|
+
EvaluationRunProperties.EVALUATION_SDK: f"azure-ai-evaluation:{VERSION}",
|
|
287
|
+
"_azureml.evaluate_artifacts": json.dumps([{"path": artifact_name, "type": "table"}]),
|
|
288
|
+
}
|
|
289
|
+
properties.update(_convert_name_map_into_property_entries(name_map))
|
|
290
|
+
ev_run.write_properties_to_run_history(properties=properties)
|
|
291
|
+
else:
|
|
165
292
|
ev_run.write_properties_to_run_history(
|
|
166
293
|
properties={
|
|
167
|
-
EvaluationRunProperties.
|
|
168
|
-
EvaluationRunProperties.EVALUATION_RUN: "promptflow.BatchRun",
|
|
169
|
-
"_azureml.evaluate_artifacts": json.dumps([{"path": artifact_name, "type": "table"}]),
|
|
294
|
+
EvaluationRunProperties.EVALUATION_SDK: f"azure-ai-evaluation:{VERSION}",
|
|
170
295
|
}
|
|
171
296
|
)
|
|
172
297
|
|
|
@@ -209,9 +334,14 @@ def _write_output(path: Union[str, os.PathLike], data_dict: Any) -> None:
|
|
|
209
334
|
p = p / DEFAULT_EVALUATION_RESULTS_FILE_NAME
|
|
210
335
|
|
|
211
336
|
with open(p, "w", encoding=DefaultOpenEncoding.WRITE) as f:
|
|
212
|
-
json.dump(data_dict, f)
|
|
337
|
+
json.dump(data_dict, f, ensure_ascii=False)
|
|
213
338
|
|
|
214
|
-
|
|
339
|
+
# Use tqdm.write to print message without interfering with any current progress bar
|
|
340
|
+
# Fall back to regular print if tqdm.write fails (e.g., when progress bar is closed)
|
|
341
|
+
try:
|
|
342
|
+
tqdm.write(f'Evaluation results saved to "{p.resolve()}".\n')
|
|
343
|
+
except Exception:
|
|
344
|
+
print(f'Evaluation results saved to "{p.resolve()}".\n')
|
|
215
345
|
|
|
216
346
|
|
|
217
347
|
def _apply_column_mapping(
|
|
@@ -296,3 +426,68 @@ def set_event_loop_policy() -> None:
|
|
|
296
426
|
# Reference: https://stackoverflow.com/questions/45600579/asyncio-event-loop-is-closed-when-getting-loop
|
|
297
427
|
# On Windows seems to be a problem with EventLoopPolicy, use this snippet to work around it
|
|
298
428
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore[attr-defined]
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
# textwrap.wrap tries to do fancy nonsense that we don't want
|
|
432
|
+
def _wrap(s, w):
|
|
433
|
+
return [s[i : i + w] for i in range(0, len(s), w)]
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def _convert_name_map_into_property_entries(
|
|
437
|
+
name_map: Dict[str, str], segment_length: int = 950, max_segments: int = 10
|
|
438
|
+
) -> Dict[str, Any]:
|
|
439
|
+
"""
|
|
440
|
+
Convert the name map into property entries.
|
|
441
|
+
|
|
442
|
+
:param name_map: The name map to be converted.
|
|
443
|
+
:type name_map: Dict[str, str]
|
|
444
|
+
:param segment_length: The max length of each individual segment,
|
|
445
|
+
which will each have their own dictionary entry
|
|
446
|
+
:type segment_length: str
|
|
447
|
+
:param max_segments: The max number of segments we can have. If the stringified
|
|
448
|
+
name map is too long, we just return a length entry with a value
|
|
449
|
+
of -1 to indicate that the map was too long.
|
|
450
|
+
:type max_segments: str
|
|
451
|
+
:return: The converted name map.
|
|
452
|
+
:rtype: Dict[str, Any]
|
|
453
|
+
"""
|
|
454
|
+
name_map_string = json.dumps(name_map)
|
|
455
|
+
num_segments = math.ceil(len(name_map_string) / segment_length)
|
|
456
|
+
# Property map is somehow still too long to encode within the space
|
|
457
|
+
# we allow, so give up, but make sure the service knows we gave up
|
|
458
|
+
if num_segments > max_segments:
|
|
459
|
+
return {EvaluationRunProperties.NAME_MAP_LENGTH: -1}
|
|
460
|
+
|
|
461
|
+
result: Dict[str, Any] = {EvaluationRunProperties.NAME_MAP_LENGTH: num_segments}
|
|
462
|
+
segments_list = _wrap(name_map_string, segment_length)
|
|
463
|
+
for i in range(0, num_segments):
|
|
464
|
+
segment_key = f"{EvaluationRunProperties.NAME_MAP}_{i}"
|
|
465
|
+
result[segment_key] = segments_list[i]
|
|
466
|
+
return result
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
class JSONLDataFileLoader:
|
|
470
|
+
def __init__(self, filename: Union[os.PathLike, str]):
|
|
471
|
+
self.filename = filename
|
|
472
|
+
|
|
473
|
+
def load(self) -> pd.DataFrame:
|
|
474
|
+
return pd.read_json(self.filename, lines=True, dtype=object)
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
class CSVDataFileLoader:
|
|
478
|
+
def __init__(self, filename: Union[os.PathLike, str]):
|
|
479
|
+
self.filename = filename
|
|
480
|
+
|
|
481
|
+
def load(self) -> pd.DataFrame:
|
|
482
|
+
return pd.read_csv(self.filename, dtype=str)
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
class DataLoaderFactory:
|
|
486
|
+
@staticmethod
|
|
487
|
+
def get_loader(filename: Union[os.PathLike, str]) -> Union[JSONLDataFileLoader, CSVDataFileLoader]:
|
|
488
|
+
filename_str = str(filename).lower()
|
|
489
|
+
if filename_str.endswith(".csv"):
|
|
490
|
+
return CSVDataFileLoader(filename)
|
|
491
|
+
|
|
492
|
+
# fallback to JSONL to maintain backward compatibility
|
|
493
|
+
return JSONLDataFileLoader(filename)
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
from typing import Dict, List, Optional, Any
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class EvaluatorMetric:
|
|
8
|
+
type: str = "ordinal"
|
|
9
|
+
desirable_direction: Optional[str] = None
|
|
10
|
+
min_value: Optional[float] = None
|
|
11
|
+
max_value: Optional[float] = None
|
|
12
|
+
|
|
13
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
14
|
+
result = {"type": self.type}
|
|
15
|
+
if self.desirable_direction is not None:
|
|
16
|
+
result["desirable_direction"] = self.desirable_direction
|
|
17
|
+
if self.min_value is not None:
|
|
18
|
+
result["min_value"] = self.min_value
|
|
19
|
+
if self.max_value is not None:
|
|
20
|
+
result["max_value"] = self.max_value
|
|
21
|
+
return result
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
def from_dict(cls, data: Dict[str, Any]) -> "EvaluatorMetric":
|
|
25
|
+
return cls(
|
|
26
|
+
type=data.get("type", "ordinal"),
|
|
27
|
+
desirable_direction=data.get("desirable_direction"),
|
|
28
|
+
min_value=data.get("min_value"),
|
|
29
|
+
max_value=data.get("max_value"),
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class ObjectParameterDescriptorWithRequired:
|
|
35
|
+
required: List[str] = field(default_factory=list)
|
|
36
|
+
type: str = "object"
|
|
37
|
+
properties: Dict[str, Any] = field(default_factory=dict)
|
|
38
|
+
|
|
39
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
40
|
+
return {"required": self.required, "type": self.type, "properties": self.properties}
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def from_dict(cls, data: Dict[str, Any]) -> "ObjectParameterDescriptorWithRequired":
|
|
44
|
+
return cls(
|
|
45
|
+
required=data.get("required", []), type=data.get("type", "object"), properties=data.get("properties", {})
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class EvaluatorDefinition(ABC):
|
|
50
|
+
"""Base class for evaluator definitions"""
|
|
51
|
+
|
|
52
|
+
def __init__(self):
|
|
53
|
+
self.init_parameters: ObjectParameterDescriptorWithRequired = ObjectParameterDescriptorWithRequired()
|
|
54
|
+
self.metrics: Dict[str, EvaluatorMetric] = {}
|
|
55
|
+
self.data_schema: ObjectParameterDescriptorWithRequired = ObjectParameterDescriptorWithRequired()
|
|
56
|
+
self.type: str = "unknown"
|
|
57
|
+
|
|
58
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
59
|
+
result = {
|
|
60
|
+
"type": self.type,
|
|
61
|
+
"init_parameters": self.init_parameters.to_dict(),
|
|
62
|
+
"metrics": {k: v.to_dict() for k, v in self.metrics.items()},
|
|
63
|
+
"data_schema": self.data_schema.to_dict(),
|
|
64
|
+
}
|
|
65
|
+
return result
|
|
66
|
+
|
|
67
|
+
@classmethod
|
|
68
|
+
def from_dict(cls, data: Dict[str, Any]) -> "EvaluatorDefinition":
|
|
69
|
+
# Create a generic instance since specific subclasses are not defined
|
|
70
|
+
instance = cls.__new__(cls)
|
|
71
|
+
instance.__init__()
|
|
72
|
+
|
|
73
|
+
instance.init_parameters = ObjectParameterDescriptorWithRequired.from_dict(data.get("init_parameters", {}))
|
|
74
|
+
instance.metrics = {k: EvaluatorMetric.from_dict(v) for k, v in data.get("metrics", {}).items()}
|
|
75
|
+
instance.data_schema = ObjectParameterDescriptorWithRequired.from_dict(data.get("data_schema", {}))
|
|
76
|
+
return instance
|