azure-ai-evaluation 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (53) hide show
  1. azure/ai/evaluation/__init__.py +0 -16
  2. azure/ai/evaluation/_common/rai_service.py +1 -1
  3. azure/ai/evaluation/_common/utils.py +1 -1
  4. azure/ai/evaluation/_converters/__init__.py +1 -1
  5. azure/ai/evaluation/_converters/_ai_services.py +4 -4
  6. azure/ai/evaluation/_evaluate/_batch_run/code_client.py +18 -12
  7. azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +9 -4
  8. azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +42 -22
  9. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +1 -1
  10. azure/ai/evaluation/_evaluate/_eval_run.py +1 -1
  11. azure/ai/evaluation/_evaluate/_evaluate.py +84 -68
  12. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +5 -89
  13. azure/ai/evaluation/_evaluate/_utils.py +3 -3
  14. azure/ai/evaluation/_evaluators/_common/_base_eval.py +1 -1
  15. azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +1 -1
  16. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +1 -1
  17. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +1 -1
  18. azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +1 -0
  19. azure/ai/evaluation/_legacy/_adapters/__init__.py +21 -0
  20. azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
  21. azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
  22. azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
  23. azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
  24. azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
  25. azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
  26. azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
  27. azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
  28. azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
  29. azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
  30. azure/ai/evaluation/_legacy/_batch_engine/_result.py +1 -1
  31. azure/ai/evaluation/_legacy/_batch_engine/_status.py +1 -1
  32. azure/ai/evaluation/_version.py +1 -1
  33. azure/ai/evaluation/red_team/__init__.py +19 -0
  34. azure/ai/evaluation/{_red_team → red_team}/_attack_objective_generator.py +3 -0
  35. azure/ai/evaluation/{_red_team → red_team}/_attack_strategy.py +3 -0
  36. azure/ai/evaluation/{_red_team → red_team}/_red_team.py +96 -67
  37. azure/ai/evaluation/red_team/_red_team_result.py +382 -0
  38. azure/ai/evaluation/{_red_team → red_team}/_utils/constants.py +2 -1
  39. azure/ai/evaluation/{_red_team → red_team}/_utils/formatting_utils.py +23 -22
  40. azure/ai/evaluation/{_red_team → red_team}/_utils/logging_utils.py +1 -1
  41. azure/ai/evaluation/{_red_team → red_team}/_utils/strategy_utils.py +8 -4
  42. azure/ai/evaluation/simulator/_simulator.py +1 -1
  43. {azure_ai_evaluation-1.4.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/METADATA +13 -2
  44. {azure_ai_evaluation-1.4.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/RECORD +50 -40
  45. azure/ai/evaluation/_red_team/_red_team_result.py +0 -246
  46. azure/ai/evaluation/_red_team/_utils/__init__.py +0 -3
  47. azure/ai/evaluation/simulator/_tracing.py +0 -89
  48. /azure/ai/evaluation/{_red_team → red_team}/_callback_chat_target.py +0 -0
  49. /azure/ai/evaluation/{_red_team → red_team}/_default_converter.py +0 -0
  50. /azure/ai/evaluation/{_red_team → red_team/_utils}/__init__.py +0 -0
  51. {azure_ai_evaluation-1.4.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/NOTICE.txt +0 -0
  52. {azure_ai_evaluation-1.4.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/WHEEL +0 -0
  53. {azure_ai_evaluation-1.4.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/top_level.txt +0 -0
@@ -9,11 +9,10 @@ import logging
9
9
  from typing import Callable, Dict, Literal, Optional, Union, cast
10
10
 
11
11
  import pandas as pd
12
- from promptflow._sdk.entities._flows import FlexFlow as flex_flow
13
- from promptflow._sdk.entities._flows import Prompty as prompty_sdk
14
- from promptflow._sdk.entities._flows.dag import Flow as dag_flow
15
- from promptflow.client import PFClient
16
- from promptflow.core import Prompty as prompty_core
12
+ from azure.ai.evaluation._legacy._adapters._flows import FlexFlow as flex_flow
13
+ from azure.ai.evaluation._legacy._adapters._flows import AsyncPrompty as prompty_sdk
14
+ from azure.ai.evaluation._legacy._adapters._flows import Flow as dag_flow
15
+ from azure.ai.evaluation._legacy._adapters.client import PFClient
17
16
  from typing_extensions import ParamSpec
18
17
 
19
18
  from azure.ai.evaluation._model_configurations import AzureAIProject, EvaluationResult
@@ -66,7 +65,7 @@ def _get_evaluator_properties(evaluator, evaluator_name):
66
65
 
67
66
  try:
68
67
  # Cover flex flow and prompty based evaluator
69
- if isinstance(evaluator, (prompty_sdk, prompty_core, flex_flow)):
68
+ if isinstance(evaluator, (prompty_sdk, flex_flow)):
70
69
  name = evaluator.name
71
70
  pf_type = evaluator.__class__.__name__
72
71
  # Cover dag flow based evaluator
@@ -94,86 +93,3 @@ def _get_evaluator_properties(evaluator, evaluator_name):
94
93
  "type": _get_evaluator_type(evaluator),
95
94
  "alias": evaluator_name if evaluator_name else "",
96
95
  }
97
-
98
-
99
- # cspell:ignore isna
100
- def log_evaluate_activity(func: Callable[P, EvaluationResult]) -> Callable[P, EvaluationResult]:
101
- """Decorator to log evaluate activity
102
-
103
- :param func: The function to be decorated
104
- :type func: Callable
105
- :returns: The decorated function
106
- :rtype: Callable[P, EvaluationResult]
107
- """
108
-
109
- @functools.wraps(func)
110
- def wrapper(*args: P.args, **kwargs: P.kwargs) -> EvaluationResult:
111
- from promptflow._sdk._telemetry import ActivityType, log_activity
112
- from promptflow._sdk._telemetry.telemetry import get_telemetry_logger
113
-
114
- evaluators = cast(Optional[Dict[str, Callable]], kwargs.get("evaluators", {})) or {}
115
- azure_ai_project = cast(Optional[AzureAIProject], kwargs.get("azure_ai_project", None))
116
-
117
- pf_client = PFClient(
118
- config=(
119
- {"trace.destination": _trace_destination_from_project_scope(azure_ai_project)}
120
- if azure_ai_project
121
- else None
122
- ),
123
- user_agent=USER_AGENT,
124
- )
125
-
126
- trace_destination = pf_client._config.get_trace_destination() # pylint: disable=protected-access
127
- track_in_cloud = bool(trace_destination) if trace_destination != "none" else False
128
- evaluate_target = bool(kwargs.get("target", None))
129
- evaluator_config = bool(kwargs.get("evaluator_config", None))
130
- custom_dimensions: Dict[str, Union[str, bool]] = {
131
- "track_in_cloud": track_in_cloud,
132
- "evaluate_target": evaluate_target,
133
- "evaluator_config": evaluator_config,
134
- }
135
-
136
- with log_activity(
137
- get_telemetry_logger(),
138
- "pf.evals.evaluate",
139
- activity_type=ActivityType.PUBLICAPI,
140
- user_agent=USER_AGENT,
141
- custom_dimensions=custom_dimensions,
142
- ):
143
- result = func(*args, **kwargs)
144
-
145
- try:
146
- evaluators_info = []
147
- for evaluator_name, evaluator in evaluators.items():
148
- evaluator_info = _get_evaluator_properties(evaluator, evaluator_name)
149
- try:
150
- evaluator_df = pd.DataFrame(result.get("rows", [])).filter(
151
- like=f"outputs.{evaluator_name}", axis=1
152
- )
153
-
154
- failed_rows = (
155
- evaluator_df.shape[0] if evaluator_df.empty else int(evaluator_df.isna().any(axis=1).sum())
156
- )
157
- total_rows = evaluator_df.shape[0]
158
-
159
- evaluator_info["failed_rows"] = failed_rows
160
- evaluator_info["total_rows"] = total_rows
161
- except Exception as e: # pylint: disable=broad-exception-caught
162
- LOGGER.debug("Failed to collect evaluate failed row info for %s: %s", evaluator_name, e)
163
- evaluators_info.append(evaluator_info)
164
-
165
- custom_dimensions = {"evaluators_info": json.dumps(evaluators_info)}
166
- with log_activity(
167
- get_telemetry_logger(),
168
- "pf.evals.evaluate_usage_info",
169
- activity_type=ActivityType.PUBLICAPI,
170
- user_agent=USER_AGENT,
171
- custom_dimensions=custom_dimensions,
172
- ):
173
- pass
174
- except Exception as e: # pylint: disable=broad-exception-caught
175
- LOGGER.debug("Failed to collect evaluate usage info: %s", e)
176
-
177
- return result
178
-
179
- return wrapper
@@ -12,7 +12,7 @@ import uuid
12
12
  import base64
13
13
 
14
14
  import pandas as pd
15
- from promptflow.entities import Run
15
+ from azure.ai.evaluation._legacy._adapters.entities import Run
16
16
 
17
17
  from azure.ai.evaluation._constants import (
18
18
  DEFAULT_EVALUATION_RESULTS_FILE_NAME,
@@ -46,7 +46,7 @@ def is_none(value) -> bool:
46
46
  def extract_workspace_triad_from_trace_provider( # pylint: disable=name-too-long
47
47
  trace_provider: str,
48
48
  ) -> AzureMLWorkspace:
49
- from promptflow._cli._utils import get_workspace_triad_from_local
49
+ from azure.ai.evaluation._legacy._adapters.utils import get_workspace_triad_from_local
50
50
 
51
51
  match = re.match(AZURE_WORKSPACE_REGEX_FORMAT, trace_provider)
52
52
  if not match or len(match.groups()) != 5:
@@ -131,7 +131,7 @@ def _log_metrics_and_instance_results(
131
131
  metrics: Dict[str, Any],
132
132
  instance_results: pd.DataFrame,
133
133
  trace_destination: Optional[str],
134
- run: Run,
134
+ run: Optional[Run],
135
135
  evaluation_name: Optional[str],
136
136
  **kwargs,
137
137
  ) -> Optional[str]:
@@ -6,7 +6,7 @@ import inspect
6
6
  from abc import ABC, abstractmethod
7
7
  from typing import Any, Callable, Dict, Generic, List, TypedDict, TypeVar, Union, cast, final, Optional
8
8
 
9
- from promptflow._utils.async_utils import async_run_allowing_running_loop
9
+ from azure.ai.evaluation._legacy._adapters.utils import async_run_allowing_running_loop
10
10
  from typing_extensions import ParamSpec, TypeAlias, get_overloads
11
11
 
12
12
  from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
@@ -4,7 +4,7 @@
4
4
  from concurrent.futures import as_completed
5
5
  from typing import TypeVar, Dict, List
6
6
 
7
- from promptflow.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
7
+ from azure.ai.evaluation._legacy._adapters.tracing import ThreadPoolExecutorWithContext as ThreadPoolExecutor
8
8
  from typing_extensions import override
9
9
 
10
10
  from azure.ai.evaluation._evaluators._common import EvaluatorBase
@@ -6,7 +6,7 @@ import math
6
6
  import re
7
7
  from typing import Dict, TypeVar, Union
8
8
 
9
- from promptflow.core import AsyncPrompty
9
+ from azure.ai.evaluation._legacy.prompty import AsyncPrompty
10
10
  from typing_extensions import override
11
11
 
12
12
  from azure.ai.evaluation._common.constants import PROMPT_BASED_REASON_EVALUATORS
@@ -5,7 +5,7 @@ import os
5
5
  from typing import Dict, List, Optional, Union
6
6
 
7
7
  from typing_extensions import overload, override
8
- from promptflow.core import AsyncPrompty
8
+ from azure.ai.evaluation._legacy._adapters._flows import AsyncPrompty
9
9
 
10
10
  from azure.ai.evaluation._evaluators._common import PromptyEvaluatorBase
11
11
  from azure.ai.evaluation._model_configurations import Conversation
@@ -15,6 +15,7 @@ from azure.ai.evaluation._model_configurations import Conversation, Message
15
15
  from azure.ai.evaluation._common._experimental import experimental
16
16
 
17
17
 
18
+ @experimental
18
19
  class ResponseCompletenessEvaluator(PromptyEvaluatorBase[Union[str, float]]):
19
20
  """
20
21
  Evaluates the extent to which a given response contains all necessary and relevant information with respect to the
@@ -0,0 +1,21 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ # NOTE: This contains adapters that make the Promptflow dependency optional. In the first phase,
6
+ # Promptflow will still be installed as part of the azure-ai-evaluation dependencies. This
7
+ # will be removed in the future once the code migration is complete.
8
+
9
+ from typing import Final
10
+
11
+
12
+ _has_legacy = False
13
+ try:
14
+ from promptflow.client import PFClient
15
+
16
+ _has_legacy = True
17
+ except ImportError:
18
+ pass
19
+
20
+ HAS_LEGACY_SDK: Final[bool] = _has_legacy
21
+ MISSING_LEGACY_SDK: Final[bool] = not _has_legacy
@@ -0,0 +1,45 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from pathlib import Path
6
+ from typing import Any, Dict, Final, Optional
7
+ from typing_extensions import TypeAlias
8
+
9
+
10
+ try:
11
+ from promptflow._sdk._configuration import Configuration as _Configuration
12
+ except ImportError:
13
+ _global_config: Final[Dict[str, Any]] = {}
14
+
15
+ class _Configuration:
16
+ TRACE_DESTINATION: Final[str] = "trace.destination"
17
+ _instance = None
18
+
19
+ def __init__(self, *, override_config: Optional[Dict[str, Any]] = None) -> None:
20
+ self._config = override_config or {}
21
+
22
+ @classmethod
23
+ def get_instance(cls) -> "_Configuration":
24
+ """Use this to get instance to avoid multiple copies of same global config."""
25
+ if cls._instance is None:
26
+ cls._instance = Configuration(override_config=_global_config)
27
+ return cls._instance
28
+
29
+ def set_config(self, key: str, value: Any) -> None:
30
+ # Simulated config storage
31
+ self._config[key] = value
32
+
33
+ def get_config(self, key: str) -> Any:
34
+ # Simulated config storage
35
+ if key in self._config:
36
+ return self._config[key]
37
+ return _global_config.get(key, None)
38
+
39
+ def get_trace_destination(self, path: Optional[Path] = None) -> Optional[str]:
40
+ if path:
41
+ raise NotImplementedError("Setting trace destination with a path is not supported.")
42
+ return self._config.get("trace.destination", None)
43
+
44
+
45
+ Configuration: TypeAlias = _Configuration
@@ -0,0 +1,10 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Final
6
+
7
+
8
+ PF_FLOW_ENTRY_IN_TMP: Final[str] = "PF_FLOW_ENTRY_IN_TMP"
9
+ PF_FLOW_META_LOAD_IN_SUBPROCESS: Final[str] = "PF_FLOW_META_LOAD_IN_SUBPROCESS"
10
+ LINE_NUMBER: Final[str] = "line_number"
@@ -0,0 +1,29 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Any
6
+ from typing_extensions import TypeAlias
7
+
8
+
9
+ try:
10
+ from promptflow.core._errors import MissingRequiredPackage as _MissingRequiredPackage
11
+ except ImportError:
12
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
13
+
14
+ class _MissingRequiredPackage(EvaluationException):
15
+ """Raised when a required package is missing.
16
+
17
+ :param message: A message describing the error. This is the error message the user will see.
18
+ :type message: str
19
+ """
20
+
21
+ def __init__(self, message: str, **kwargs: Any):
22
+ kwargs.setdefault("category", ErrorCategory.MISSING_PACKAGE)
23
+ kwargs.setdefault("blame", ErrorBlame.SYSTEM_ERROR)
24
+ kwargs.setdefault("target", ErrorTarget.EVALUATE)
25
+ kwargs.setdefault("internal_message", "Missing required package.")
26
+ super().__init__(message=message, **kwargs)
27
+
28
+
29
+ MissingRequiredPackage: TypeAlias = _MissingRequiredPackage
@@ -0,0 +1,28 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing_extensions import TypeAlias
6
+
7
+
8
+ try:
9
+ from promptflow._sdk.entities._flows import AsyncPrompty as _AsyncPrompty
10
+ from promptflow._sdk.entities._flows import FlexFlow as _FlexFlow
11
+ from promptflow._sdk.entities._flows.dag import Flow as _Flow
12
+ except ImportError:
13
+ from azure.ai.evaluation._legacy.prompty import AsyncPrompty as _AsyncPrompty
14
+
15
+ class _FlexFlow:
16
+ pass
17
+
18
+ _FlexFlow.__name__ = "FlexFlow"
19
+
20
+ class _Flow:
21
+ name: str
22
+
23
+ _Flow.__name__ = "Flow"
24
+
25
+
26
+ AsyncPrompty: TypeAlias = _AsyncPrompty
27
+ FlexFlow: TypeAlias = _FlexFlow
28
+ Flow: TypeAlias = _Flow
@@ -0,0 +1,16 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Callable, Final
6
+
7
+
8
+ try:
9
+ from promptflow._cli._pf._service import stop_service as _stop_service
10
+ except ImportError:
11
+
12
+ def _stop_service() -> None:
13
+ pass
14
+
15
+
16
+ stop_service: Final[Callable[[], None]] = _stop_service
@@ -0,0 +1,51 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from os import PathLike
6
+ from typing import Any, Callable, Dict, Optional, Union
7
+ from typing_extensions import TypeAlias
8
+
9
+ import pandas as pd
10
+
11
+ from ._errors import MissingRequiredPackage
12
+ from ._configuration import Configuration
13
+ from .entities import Run
14
+
15
+
16
+ try:
17
+ from promptflow.client import PFClient as _PFClient
18
+ except ImportError:
19
+
20
+ class _PFClient:
21
+ def __init__(self, **kwargs):
22
+ self._config = Configuration(override_config=kwargs.pop("config", None))
23
+
24
+ def run(
25
+ self,
26
+ flow: Union[str, PathLike, Callable],
27
+ *,
28
+ data: Union[str, PathLike],
29
+ run: Optional[Union[str, Run]] = None,
30
+ column_mapping: Optional[dict] = None,
31
+ variant: Optional[str] = None,
32
+ connections: Optional[dict] = None,
33
+ environment_variables: Optional[dict] = None,
34
+ name: Optional[str] = None,
35
+ display_name: Optional[str] = None,
36
+ tags: Optional[Dict[str, str]] = None,
37
+ resume_from: Optional[Union[str, Run]] = None,
38
+ code: Optional[Union[str, PathLike]] = None,
39
+ init: Optional[dict] = None,
40
+ **kwargs,
41
+ ) -> Run:
42
+ raise MissingRequiredPackage("Please install 'promptflow' package to use PFClient")
43
+
44
+ def get_details(self, run: Union[str, Run], max_results: int = 100, all_results: bool = False) -> pd.DataFrame:
45
+ return pd.DataFrame()
46
+
47
+ def get_metrics(self, run: Union[str, Run]) -> Dict[str, Any]:
48
+ return {}
49
+
50
+
51
+ PFClient: TypeAlias = _PFClient
@@ -0,0 +1,26 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing_extensions import TypeAlias
6
+
7
+
8
+ try:
9
+ from promptflow._sdk.entities import Run as _Run
10
+ except ImportError:
11
+ from typing_extensions import Protocol
12
+ from typing import Any, Dict, Optional
13
+ from datetime import datetime
14
+ from pathlib import Path
15
+
16
+ class _Run(Protocol):
17
+ name: str
18
+ status: str
19
+ _properties: Dict[str, Any]
20
+ _created_on: datetime
21
+ _end_time: Optional[datetime]
22
+ _experiment_name: Optional[str]
23
+ _output_path: Path
24
+
25
+
26
+ Run: TypeAlias = _Run
@@ -0,0 +1,28 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Callable, Dict, Final, Optional
6
+ from typing_extensions import TypeAlias
7
+
8
+
9
+ try:
10
+ from promptflow.tracing import ThreadPoolExecutorWithContext as _ThreadPoolExecutorWithContext
11
+ from promptflow.tracing._integrations._openai_injector import (
12
+ inject_openai_api as _inject,
13
+ recover_openai_api as _recover,
14
+ )
15
+ from promptflow.tracing import _start_trace
16
+ except ImportError:
17
+ from concurrent.futures import ThreadPoolExecutor as _ThreadPoolExecutorWithContext
18
+ from azure.ai.evaluation._legacy._batch_engine._openai_injector import (
19
+ inject_openai_api as _inject,
20
+ recover_openai_api as _recover,
21
+ )
22
+ from azure.ai.evaluation._legacy._batch_engine._trace import start_trace as _start_trace
23
+
24
+
25
+ ThreadPoolExecutorWithContext: TypeAlias = _ThreadPoolExecutorWithContext
26
+ inject_openai_api: Final[Callable[[], None]] = _inject
27
+ recover_openai_api: Final[Callable[[], None]] = _recover
28
+ start_trace: Final = _start_trace
@@ -0,0 +1,15 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Any
6
+
7
+
8
+ class AttrDict(dict):
9
+ """A dictionary that allows attribute access to its keys."""
10
+
11
+ def __getattr__(self, key: str) -> Any:
12
+ return self[key]
13
+
14
+ def __setattr__(self, key: str, value: Any) -> None:
15
+ self[key] = value
@@ -0,0 +1,31 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Final, Optional
6
+ from typing_extensions import TypeAlias
7
+
8
+
9
+ try:
10
+ from promptflow._utils.user_agent_utils import ClientUserAgentUtil as _ClientUserAgentUtil
11
+ from promptflow._utils.async_utils import async_run_allowing_running_loop as _async_run_allowing_running_loop
12
+ from promptflow._cli._utils import get_workspace_triad_from_local as _get_workspace_triad_from_local
13
+ except ImportError:
14
+ from azure.ai.evaluation._legacy._batch_engine._utils_deprecated import (
15
+ async_run_allowing_running_loop as _async_run_allowing_running_loop,
16
+ )
17
+ from azure.ai.evaluation._evaluate._utils import AzureMLWorkspace
18
+
19
+ class _ClientUserAgentUtil:
20
+ @staticmethod
21
+ def append_user_agent(user_agent: Optional[str]):
22
+ # TODO ralphe: implement?
23
+ pass
24
+
25
+ def _get_workspace_triad_from_local() -> AzureMLWorkspace:
26
+ return AzureMLWorkspace("", "", "")
27
+
28
+
29
+ ClientUserAgentUtil: TypeAlias = _ClientUserAgentUtil
30
+ async_run_allowing_running_loop: Final = _async_run_allowing_running_loop
31
+ get_workspace_triad_from_local: Final = _get_workspace_triad_from_local
@@ -96,4 +96,4 @@ class BatchResult:
96
96
  """The results of the batch run."""
97
97
  if not self.details:
98
98
  return []
99
- return [d.result for d in self.details]
99
+ return [d.result for d in self.details]
@@ -22,4 +22,4 @@ class BatchStatus(IntEnum):
22
22
 
23
23
  @staticmethod
24
24
  def is_failed(status: "BatchStatus") -> bool:
25
- return status == BatchStatus.Failed or status == BatchStatus.Canceled
25
+ return status == BatchStatus.Failed or status == BatchStatus.Canceled
@@ -3,4 +3,4 @@
3
3
  # ---------------------------------------------------------
4
4
  # represents upcoming version
5
5
 
6
- VERSION = "1.4.0"
6
+ VERSION = "1.5.0"
@@ -0,0 +1,19 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ try:
6
+ from ._red_team import RedTeam
7
+ from ._attack_strategy import AttackStrategy
8
+ from ._attack_objective_generator import RiskCategory
9
+ from ._red_team_result import RedTeamResult
10
+ except ImportError:
11
+ print("[INFO] Could not import Pyrit. Please install the dependency with `pip install azure-ai-evaluation[redteam]`.")
12
+
13
+
14
+ __all__ = [
15
+ "RedTeam",
16
+ "AttackStrategy",
17
+ "RiskCategory",
18
+ "RedTeamResult",
19
+ ]
@@ -7,7 +7,10 @@ import os
7
7
  from enum import Enum
8
8
  from typing import List, Optional, Dict, Any
9
9
  from pathlib import Path
10
+ from azure.ai.evaluation._common._experimental import experimental
10
11
 
12
+
13
+ @experimental
11
14
  class RiskCategory(str, Enum):
12
15
  """Risk categories for attack objectives."""
13
16
  HateUnfairness = "hate_unfairness"
@@ -3,7 +3,10 @@
3
3
  # ---------------------------------------------------------
4
4
  from enum import Enum
5
5
  from typing import List
6
+ from azure.ai.evaluation._common._experimental import experimental
6
7
 
8
+
9
+ @experimental
7
10
  class AttackStrategy(Enum):
8
11
  """Strategies for attacks."""
9
12
  EASY = "easy"