azure-ai-evaluation 1.3.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (142) hide show
  1. azure/ai/evaluation/__init__.py +27 -1
  2. azure/ai/evaluation/_azure/_models.py +6 -6
  3. azure/ai/evaluation/_common/constants.py +6 -2
  4. azure/ai/evaluation/_common/rai_service.py +39 -5
  5. azure/ai/evaluation/_common/raiclient/__init__.py +34 -0
  6. azure/ai/evaluation/_common/raiclient/_client.py +128 -0
  7. azure/ai/evaluation/_common/raiclient/_configuration.py +87 -0
  8. azure/ai/evaluation/_common/raiclient/_model_base.py +1235 -0
  9. azure/ai/evaluation/_common/raiclient/_patch.py +20 -0
  10. azure/ai/evaluation/_common/raiclient/_serialization.py +2050 -0
  11. azure/ai/evaluation/_common/raiclient/_version.py +9 -0
  12. azure/ai/evaluation/_common/raiclient/aio/__init__.py +29 -0
  13. azure/ai/evaluation/_common/raiclient/aio/_client.py +130 -0
  14. azure/ai/evaluation/_common/raiclient/aio/_configuration.py +87 -0
  15. azure/ai/evaluation/_common/raiclient/aio/_patch.py +20 -0
  16. azure/ai/evaluation/_common/raiclient/aio/operations/__init__.py +25 -0
  17. azure/ai/evaluation/_common/raiclient/aio/operations/_operations.py +981 -0
  18. azure/ai/evaluation/_common/raiclient/aio/operations/_patch.py +20 -0
  19. azure/ai/evaluation/_common/raiclient/models/__init__.py +60 -0
  20. azure/ai/evaluation/_common/raiclient/models/_enums.py +18 -0
  21. azure/ai/evaluation/_common/raiclient/models/_models.py +651 -0
  22. azure/ai/evaluation/_common/raiclient/models/_patch.py +20 -0
  23. azure/ai/evaluation/_common/raiclient/operations/__init__.py +25 -0
  24. azure/ai/evaluation/_common/raiclient/operations/_operations.py +1225 -0
  25. azure/ai/evaluation/_common/raiclient/operations/_patch.py +20 -0
  26. azure/ai/evaluation/_common/raiclient/py.typed +1 -0
  27. azure/ai/evaluation/_common/utils.py +23 -3
  28. azure/ai/evaluation/_constants.py +7 -0
  29. azure/ai/evaluation/_converters/__init__.py +3 -0
  30. azure/ai/evaluation/_converters/_ai_services.py +804 -0
  31. azure/ai/evaluation/_converters/_models.py +302 -0
  32. azure/ai/evaluation/_evaluate/_batch_run/__init__.py +10 -3
  33. azure/ai/evaluation/_evaluate/_batch_run/_run_submitter_client.py +104 -0
  34. azure/ai/evaluation/_evaluate/_batch_run/batch_clients.py +82 -0
  35. azure/ai/evaluation/_evaluate/_batch_run/code_client.py +18 -12
  36. azure/ai/evaluation/_evaluate/_batch_run/eval_run_context.py +9 -4
  37. azure/ai/evaluation/_evaluate/_batch_run/proxy_client.py +42 -22
  38. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +1 -1
  39. azure/ai/evaluation/_evaluate/_eval_run.py +2 -2
  40. azure/ai/evaluation/_evaluate/_evaluate.py +109 -64
  41. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +5 -89
  42. azure/ai/evaluation/_evaluate/_utils.py +3 -3
  43. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +23 -3
  44. azure/ai/evaluation/_evaluators/_code_vulnerability/__init__.py +5 -0
  45. azure/ai/evaluation/_evaluators/_code_vulnerability/_code_vulnerability.py +120 -0
  46. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +21 -2
  47. azure/ai/evaluation/_evaluators/_common/_base_eval.py +44 -4
  48. azure/ai/evaluation/_evaluators/_common/_base_multi_eval.py +4 -2
  49. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +44 -5
  50. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +16 -4
  51. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +42 -5
  52. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +15 -0
  53. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +15 -0
  54. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +15 -0
  55. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +15 -0
  56. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +28 -4
  57. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +21 -2
  58. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +26 -3
  59. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +22 -4
  60. azure/ai/evaluation/_evaluators/_intent_resolution/__init__.py +7 -0
  61. azure/ai/evaluation/_evaluators/_intent_resolution/_intent_resolution.py +152 -0
  62. azure/ai/evaluation/_evaluators/_intent_resolution/intent_resolution.prompty +161 -0
  63. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +26 -3
  64. azure/ai/evaluation/_evaluators/_qa/_qa.py +51 -7
  65. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +26 -2
  66. azure/ai/evaluation/_evaluators/_response_completeness/__init__.py +7 -0
  67. azure/ai/evaluation/_evaluators/_response_completeness/_response_completeness.py +158 -0
  68. azure/ai/evaluation/_evaluators/_response_completeness/response_completeness.prompty +99 -0
  69. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +21 -2
  70. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +113 -4
  71. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +23 -3
  72. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +24 -5
  73. azure/ai/evaluation/_evaluators/_task_adherence/__init__.py +7 -0
  74. azure/ai/evaluation/_evaluators/_task_adherence/_task_adherence.py +148 -0
  75. azure/ai/evaluation/_evaluators/_task_adherence/task_adherence.prompty +117 -0
  76. azure/ai/evaluation/_evaluators/_tool_call_accuracy/__init__.py +9 -0
  77. azure/ai/evaluation/_evaluators/_tool_call_accuracy/_tool_call_accuracy.py +292 -0
  78. azure/ai/evaluation/_evaluators/_tool_call_accuracy/tool_call_accuracy.prompty +71 -0
  79. azure/ai/evaluation/_evaluators/_ungrounded_attributes/__init__.py +5 -0
  80. azure/ai/evaluation/_evaluators/_ungrounded_attributes/_ungrounded_attributes.py +103 -0
  81. azure/ai/evaluation/_evaluators/_xpia/xpia.py +2 -0
  82. azure/ai/evaluation/_exceptions.py +5 -0
  83. azure/ai/evaluation/_legacy/__init__.py +3 -0
  84. azure/ai/evaluation/_legacy/_adapters/__init__.py +21 -0
  85. azure/ai/evaluation/_legacy/_adapters/_configuration.py +45 -0
  86. azure/ai/evaluation/_legacy/_adapters/_constants.py +10 -0
  87. azure/ai/evaluation/_legacy/_adapters/_errors.py +29 -0
  88. azure/ai/evaluation/_legacy/_adapters/_flows.py +28 -0
  89. azure/ai/evaluation/_legacy/_adapters/_service.py +16 -0
  90. azure/ai/evaluation/_legacy/_adapters/client.py +51 -0
  91. azure/ai/evaluation/_legacy/_adapters/entities.py +26 -0
  92. azure/ai/evaluation/_legacy/_adapters/tracing.py +28 -0
  93. azure/ai/evaluation/_legacy/_adapters/types.py +15 -0
  94. azure/ai/evaluation/_legacy/_adapters/utils.py +31 -0
  95. azure/ai/evaluation/_legacy/_batch_engine/__init__.py +9 -0
  96. azure/ai/evaluation/_legacy/_batch_engine/_config.py +45 -0
  97. azure/ai/evaluation/_legacy/_batch_engine/_engine.py +368 -0
  98. azure/ai/evaluation/_legacy/_batch_engine/_exceptions.py +88 -0
  99. azure/ai/evaluation/_legacy/_batch_engine/_logging.py +292 -0
  100. azure/ai/evaluation/_legacy/_batch_engine/_openai_injector.py +23 -0
  101. azure/ai/evaluation/_legacy/_batch_engine/_result.py +99 -0
  102. azure/ai/evaluation/_legacy/_batch_engine/_run.py +121 -0
  103. azure/ai/evaluation/_legacy/_batch_engine/_run_storage.py +128 -0
  104. azure/ai/evaluation/_legacy/_batch_engine/_run_submitter.py +217 -0
  105. azure/ai/evaluation/_legacy/_batch_engine/_status.py +25 -0
  106. azure/ai/evaluation/_legacy/_batch_engine/_trace.py +105 -0
  107. azure/ai/evaluation/_legacy/_batch_engine/_utils.py +82 -0
  108. azure/ai/evaluation/_legacy/_batch_engine/_utils_deprecated.py +131 -0
  109. azure/ai/evaluation/_legacy/prompty/__init__.py +36 -0
  110. azure/ai/evaluation/_legacy/prompty/_connection.py +182 -0
  111. azure/ai/evaluation/_legacy/prompty/_exceptions.py +59 -0
  112. azure/ai/evaluation/_legacy/prompty/_prompty.py +313 -0
  113. azure/ai/evaluation/_legacy/prompty/_utils.py +545 -0
  114. azure/ai/evaluation/_legacy/prompty/_yaml_utils.py +99 -0
  115. azure/ai/evaluation/_safety_evaluation/__init__.py +1 -1
  116. azure/ai/evaluation/_safety_evaluation/_generated_rai_client.py +0 -0
  117. azure/ai/evaluation/_safety_evaluation/_safety_evaluation.py +251 -150
  118. azure/ai/evaluation/_version.py +1 -1
  119. azure/ai/evaluation/red_team/__init__.py +19 -0
  120. azure/ai/evaluation/red_team/_attack_objective_generator.py +195 -0
  121. azure/ai/evaluation/red_team/_attack_strategy.py +45 -0
  122. azure/ai/evaluation/red_team/_callback_chat_target.py +74 -0
  123. azure/ai/evaluation/red_team/_default_converter.py +21 -0
  124. azure/ai/evaluation/red_team/_red_team.py +1887 -0
  125. azure/ai/evaluation/red_team/_red_team_result.py +382 -0
  126. azure/ai/evaluation/red_team/_utils/__init__.py +3 -0
  127. azure/ai/evaluation/red_team/_utils/constants.py +65 -0
  128. azure/ai/evaluation/red_team/_utils/formatting_utils.py +165 -0
  129. azure/ai/evaluation/red_team/_utils/logging_utils.py +139 -0
  130. azure/ai/evaluation/red_team/_utils/strategy_utils.py +192 -0
  131. azure/ai/evaluation/simulator/_adversarial_scenario.py +3 -1
  132. azure/ai/evaluation/simulator/_adversarial_simulator.py +54 -27
  133. azure/ai/evaluation/simulator/_model_tools/_generated_rai_client.py +145 -0
  134. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +71 -1
  135. azure/ai/evaluation/simulator/_simulator.py +1 -1
  136. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/METADATA +80 -15
  137. azure_ai_evaluation-1.5.0.dist-info/RECORD +207 -0
  138. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/WHEEL +1 -1
  139. azure/ai/evaluation/simulator/_tracing.py +0 -89
  140. azure_ai_evaluation-1.3.0.dist-info/RECORD +0 -119
  141. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/NOTICE.txt +0 -0
  142. {azure_ai_evaluation-1.3.0.dist-info → azure_ai_evaluation-1.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,71 @@
1
+ ---
2
+ name: Tool Call Accuracy
3
+ description: Evaluates Tool Call Accuracy for tool used by agent
4
+ model:
5
+ api: chat
6
+ parameters:
7
+ temperature: 0.0
8
+ max_tokens: 800
9
+ top_p: 1.0
10
+ presence_penalty: 0
11
+ frequency_penalty: 0
12
+ response_format:
13
+ type: text
14
+
15
+ inputs:
16
+ query:
17
+ type: List
18
+ tool_call:
19
+ type: Dict
20
+ tool_definition:
21
+ type: Dict
22
+
23
+ ---
24
+ system:
25
+ # Instruction
26
+ ## Goal
27
+ ### Your are an expert in evaluating the accuracy of a tool call considering relevance and potential usefulness including syntactic and semantic correctness of a proposed tool call from an intelligent system based on provided definition and data. Your goal will involve answering the questions below using the information provided.
28
+ - **Definition**: You are given a definition of the communication trait that is being evaluated to help guide your Score.
29
+ - **Data**: Your input data include CONVERSATION , TOOL CALL and TOOL DEFINITION.
30
+ - **Tasks**: To complete your evaluation you will be asked to evaluate the Data in different ways.
31
+
32
+ user:
33
+ # Definition
34
+ **Tool Call Accuracy** refers to the relevance and potential usefulness of a TOOL CALL in the context of an ongoing CONVERSATION and EXTRACTION of RIGHT PARAMETER VALUES from the CONVERSATION.It assesses how likely the TOOL CALL is to contribute meaningfully to the CONVERSATION and help address the user's needs. Focus on evaluating the potential value of the TOOL CALL within the specific context of the given CONVERSATION, without making assumptions beyond the provided information.
35
+ Consider the following factors in your evaluation:
36
+
37
+ 1. Relevance: How well does the proposed tool call align with the current topic and flow of the conversation?
38
+ 2. Parameter Appropriateness: Do the parameters used in the TOOL CALL match the TOOL DEFINITION and are the parameters relevant to the latest user's query?
39
+ 3. Parameter Value Correctness: Are the parameters values used in the TOOL CALL present or inferred by CONVERSATION and relevant to the latest user's query?
40
+ 4. Potential Value: Is the information this tool call might provide likely to be useful in advancing the conversation or addressing the user expressed or implied needs?
41
+ 5. Context Appropriateness: Does the tool call make sense at this point in the conversation, given what has been discussed so far?
42
+
43
+
44
+ # Ratings
45
+ ## [Tool Call Accuracy: 0] (Irrelevant)
46
+ **Definition:**
47
+ 1. The TOOL CALL is not relevant and will not help resolve the user's need.
48
+ 2. TOOL CALL include parameters values that are not present or inferred from CONVERSATION.
49
+ 3. TOOL CALL has parameters that is not present in TOOL DEFINITION.
50
+
51
+ ## [Tool Call Accuracy: 1] (Relevant)
52
+ **Definition:**
53
+ 1. The TOOL CALL is directly relevant and very likely to help resolve the user's need.
54
+ 2. TOOL CALL include parameters values that are present or inferred from CONVERSATION.
55
+ 3. TOOL CALL has parameters that is present in TOOL DEFINITION.
56
+
57
+ # Data
58
+ CONVERSATION : {{query}}
59
+ TOOL CALL: {{tool_call}}
60
+ TOOL DEFINITION: {{tool_definition}}
61
+
62
+
63
+ # Tasks
64
+ ## Please provide your assessment Score for the previous CONVERSATION , TOOL CALL and TOOL DEFINITION based on the Definitions above. Your output should include the following information:
65
+ - **ThoughtChain**: To improve the reasoning process, think step by step and include a step-by-step explanation of your thought process as you analyze the data based on the definitions. Keep it brief and start your ThoughtChain with "Let's think step by step:".
66
+ - **Explanation**: a very short explanation of why you think the input Data should get that Score.
67
+ - **Score**: based on your previous analysis, provide your Score. The Score you give MUST be a integer score (i.e., "0", "1") based on the levels of the definitions.
68
+
69
+
70
+ ## Please provide your answers between the tags: <S0>your chain of thoughts</S0>, <S1>your explanation</S1>, <S2>your Score</S2>.
71
+ # Output
@@ -0,0 +1,5 @@
1
+ from ._ungrounded_attributes import UngroundedAttributesEvaluator
2
+
3
+ __all__ = [
4
+ "UngroundedAttributesEvaluator",
5
+ ]
@@ -0,0 +1,103 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from typing_extensions import overload, override
5
+ from typing import Dict, Union
6
+
7
+ from azure.ai.evaluation._common._experimental import experimental
8
+ from azure.ai.evaluation._common.constants import EvaluationMetrics
9
+ from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
10
+
11
+ @experimental
12
+ class UngroundedAttributesEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
13
+ """
14
+ Evaluates ungrounded inference of human attributes for a given query, response, and context for a single-turn evaluation only,
15
+ where query represents the user query and response represents the AI system response given the provided context.
16
+
17
+ Ungrounded Attributes checks for whether a response is first, ungrounded, and checks if it contains information about protected class or
18
+ emotional state of a person.
19
+
20
+
21
+ It identifies the following attributes:
22
+
23
+ - emotional_state
24
+ - protected_class
25
+ - groundedness
26
+
27
+ :param credential: The credential for connecting to Azure AI project. Required
28
+ :type credential: ~azure.core.credentials.TokenCredential
29
+ :param azure_ai_project: The scope of the Azure AI project.
30
+ It contains subscription id, resource group, and project name.
31
+ :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
32
+ :param kwargs: Additional arguments to pass to the evaluator.
33
+ :type kwargs: Any
34
+
35
+ .. admonition:: Example:
36
+
37
+ .. literalinclude:: ../samples/evaluation_samples_evaluate.py
38
+ :start-after: [START ungrounded_attributes_evaluator]
39
+ :end-before: [END ungrounded_attributes_evaluator]
40
+ :language: python
41
+ :dedent: 8
42
+ :caption: Initialize and call a UngroundedAttributesEvaluator with a query, response and context.
43
+
44
+ .. note::
45
+
46
+ If this evaluator is supplied to the `evaluate` function, the metric
47
+ for the ungrounded attributes will be "ungrounded_attributes_label".
48
+ """
49
+
50
+ id = "ungrounded_attributes"
51
+ """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
52
+
53
+ @override
54
+ def __init__(
55
+ self,
56
+ credential,
57
+ azure_ai_project,
58
+ ):
59
+ super().__init__(
60
+ eval_metric=EvaluationMetrics.UNGROUNDED_ATTRIBUTES,
61
+ azure_ai_project=azure_ai_project,
62
+ credential=credential,
63
+ )
64
+
65
+ @overload
66
+ def __call__(
67
+ self,
68
+ *,
69
+ query: str,
70
+ response: str,
71
+ context: str,
72
+ ) -> Dict[str, Union[str, float]]:
73
+ """Evaluate a given query/response pair and context for ungrounded attributes
74
+
75
+ :keyword query: The query to be evaluated.
76
+ :paramtype query: str
77
+ :keyword response: The response to be evaluated.
78
+ :paramtype response: str
79
+ :keyword context: The context to be used for evaluation.
80
+ :paramtype context: str
81
+ :return: The ungrounded attributes label.
82
+ :rtype: Dict[str, Union[str, bool]]
83
+ """
84
+
85
+ @override
86
+ def __call__( # pylint: disable=docstring-missing-param
87
+ self,
88
+ *args,
89
+ **kwargs,
90
+ ):
91
+ """Evaluate a given query/response pair and context for ungrounded attributes
92
+
93
+ :keyword query: The query to be evaluated.
94
+ :paramtype query: str
95
+ :keyword response: The response to be evaluated.
96
+ :paramtype response: str
97
+ :keyword context: The context to be used for evaluation.
98
+ :paramtype context: str
99
+ :return: The ungrounded attributes label.
100
+ :rtype: Dict[str, Union[str, bool]]
101
+ """
102
+
103
+ return super().__call__(*args, **kwargs)
@@ -43,6 +43,8 @@ class IndirectAttackEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
43
43
  :param azure_ai_project: The scope of the Azure AI project. It contains subscription id, resource group, and project
44
44
  name.
45
45
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
46
+ :param threshold: The threshold for the IndirectAttack evaluator. Default is 0.
47
+ :type threshold: int
46
48
 
47
49
  .. admonition:: Example:
48
50
 
@@ -62,15 +62,18 @@ class ErrorTarget(Enum):
62
62
  CODE_CLIENT = "CodeClient"
63
63
  RAI_CLIENT = "RAIClient"
64
64
  COHERENCE_EVALUATOR = "CoherenceEvaluator"
65
+ COMPLETENESS_EVALUATOR = "CompletenessEvaluator"
65
66
  CONTENT_SAFETY_CHAT_EVALUATOR = "ContentSafetyEvaluator"
66
67
  ECI_EVALUATOR = "ECIEvaluator"
67
68
  F1_EVALUATOR = "F1Evaluator"
68
69
  GROUNDEDNESS_EVALUATOR = "GroundednessEvaluator"
69
70
  PROTECTED_MATERIAL_EVALUATOR = "ProtectedMaterialEvaluator"
71
+ INTENT_RESOLUTION_EVALUATOR = "IntentResolutionEvaluator"
70
72
  RELEVANCE_EVALUATOR = "RelevanceEvaluator"
71
73
  SIMILARITY_EVALUATOR = "SimilarityEvaluator"
72
74
  FLUENCY_EVALUATOR = "FluencyEvaluator"
73
75
  RETRIEVAL_EVALUATOR = "RetrievalEvaluator"
76
+ TASK_ADHERENCE_EVALUATOR = "TaskAdherenceEvaluator"
74
77
  INDIRECT_ATTACK_EVALUATOR = "IndirectAttackEvaluator"
75
78
  INDIRECT_ATTACK_SIMULATOR = "IndirectAttackSimulator"
76
79
  ADVERSARIAL_SIMULATOR = "AdversarialSimulator"
@@ -80,6 +83,8 @@ class ErrorTarget(Enum):
80
83
  MODELS = "Models"
81
84
  UNKNOWN = "Unknown"
82
85
  CONVERSATION = "Conversation"
86
+ TOOL_CALL_ACCURACY_EVALUATOR = "ToolCallAccuracyEvaluator"
87
+ RED_TEAM = "RedTeam"
83
88
 
84
89
 
85
90
  class EvaluationException(AzureError):
@@ -0,0 +1,3 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
@@ -0,0 +1,21 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ # NOTE: This contains adapters that make the Promptflow dependency optional. In the first phase,
6
+ # Promptflow will still be installed as part of the azure-ai-evaluation dependencies. This
7
+ # will be removed in the future once the code migration is complete.
8
+
9
+ from typing import Final
10
+
11
+
12
+ _has_legacy = False
13
+ try:
14
+ from promptflow.client import PFClient
15
+
16
+ _has_legacy = True
17
+ except ImportError:
18
+ pass
19
+
20
+ HAS_LEGACY_SDK: Final[bool] = _has_legacy
21
+ MISSING_LEGACY_SDK: Final[bool] = not _has_legacy
@@ -0,0 +1,45 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from pathlib import Path
6
+ from typing import Any, Dict, Final, Optional
7
+ from typing_extensions import TypeAlias
8
+
9
+
10
+ try:
11
+ from promptflow._sdk._configuration import Configuration as _Configuration
12
+ except ImportError:
13
+ _global_config: Final[Dict[str, Any]] = {}
14
+
15
+ class _Configuration:
16
+ TRACE_DESTINATION: Final[str] = "trace.destination"
17
+ _instance = None
18
+
19
+ def __init__(self, *, override_config: Optional[Dict[str, Any]] = None) -> None:
20
+ self._config = override_config or {}
21
+
22
+ @classmethod
23
+ def get_instance(cls) -> "_Configuration":
24
+ """Use this to get instance to avoid multiple copies of same global config."""
25
+ if cls._instance is None:
26
+ cls._instance = Configuration(override_config=_global_config)
27
+ return cls._instance
28
+
29
+ def set_config(self, key: str, value: Any) -> None:
30
+ # Simulated config storage
31
+ self._config[key] = value
32
+
33
+ def get_config(self, key: str) -> Any:
34
+ # Simulated config storage
35
+ if key in self._config:
36
+ return self._config[key]
37
+ return _global_config.get(key, None)
38
+
39
+ def get_trace_destination(self, path: Optional[Path] = None) -> Optional[str]:
40
+ if path:
41
+ raise NotImplementedError("Setting trace destination with a path is not supported.")
42
+ return self._config.get("trace.destination", None)
43
+
44
+
45
+ Configuration: TypeAlias = _Configuration
@@ -0,0 +1,10 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Final
6
+
7
+
8
+ PF_FLOW_ENTRY_IN_TMP: Final[str] = "PF_FLOW_ENTRY_IN_TMP"
9
+ PF_FLOW_META_LOAD_IN_SUBPROCESS: Final[str] = "PF_FLOW_META_LOAD_IN_SUBPROCESS"
10
+ LINE_NUMBER: Final[str] = "line_number"
@@ -0,0 +1,29 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Any
6
+ from typing_extensions import TypeAlias
7
+
8
+
9
+ try:
10
+ from promptflow.core._errors import MissingRequiredPackage as _MissingRequiredPackage
11
+ except ImportError:
12
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
13
+
14
+ class _MissingRequiredPackage(EvaluationException):
15
+ """Raised when a required package is missing.
16
+
17
+ :param message: A message describing the error. This is the error message the user will see.
18
+ :type message: str
19
+ """
20
+
21
+ def __init__(self, message: str, **kwargs: Any):
22
+ kwargs.setdefault("category", ErrorCategory.MISSING_PACKAGE)
23
+ kwargs.setdefault("blame", ErrorBlame.SYSTEM_ERROR)
24
+ kwargs.setdefault("target", ErrorTarget.EVALUATE)
25
+ kwargs.setdefault("internal_message", "Missing required package.")
26
+ super().__init__(message=message, **kwargs)
27
+
28
+
29
+ MissingRequiredPackage: TypeAlias = _MissingRequiredPackage
@@ -0,0 +1,28 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing_extensions import TypeAlias
6
+
7
+
8
+ try:
9
+ from promptflow._sdk.entities._flows import AsyncPrompty as _AsyncPrompty
10
+ from promptflow._sdk.entities._flows import FlexFlow as _FlexFlow
11
+ from promptflow._sdk.entities._flows.dag import Flow as _Flow
12
+ except ImportError:
13
+ from azure.ai.evaluation._legacy.prompty import AsyncPrompty as _AsyncPrompty
14
+
15
+ class _FlexFlow:
16
+ pass
17
+
18
+ _FlexFlow.__name__ = "FlexFlow"
19
+
20
+ class _Flow:
21
+ name: str
22
+
23
+ _Flow.__name__ = "Flow"
24
+
25
+
26
+ AsyncPrompty: TypeAlias = _AsyncPrompty
27
+ FlexFlow: TypeAlias = _FlexFlow
28
+ Flow: TypeAlias = _Flow
@@ -0,0 +1,16 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Callable, Final
6
+
7
+
8
+ try:
9
+ from promptflow._cli._pf._service import stop_service as _stop_service
10
+ except ImportError:
11
+
12
+ def _stop_service() -> None:
13
+ pass
14
+
15
+
16
+ stop_service: Final[Callable[[], None]] = _stop_service
@@ -0,0 +1,51 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from os import PathLike
6
+ from typing import Any, Callable, Dict, Optional, Union
7
+ from typing_extensions import TypeAlias
8
+
9
+ import pandas as pd
10
+
11
+ from ._errors import MissingRequiredPackage
12
+ from ._configuration import Configuration
13
+ from .entities import Run
14
+
15
+
16
+ try:
17
+ from promptflow.client import PFClient as _PFClient
18
+ except ImportError:
19
+
20
+ class _PFClient:
21
+ def __init__(self, **kwargs):
22
+ self._config = Configuration(override_config=kwargs.pop("config", None))
23
+
24
+ def run(
25
+ self,
26
+ flow: Union[str, PathLike, Callable],
27
+ *,
28
+ data: Union[str, PathLike],
29
+ run: Optional[Union[str, Run]] = None,
30
+ column_mapping: Optional[dict] = None,
31
+ variant: Optional[str] = None,
32
+ connections: Optional[dict] = None,
33
+ environment_variables: Optional[dict] = None,
34
+ name: Optional[str] = None,
35
+ display_name: Optional[str] = None,
36
+ tags: Optional[Dict[str, str]] = None,
37
+ resume_from: Optional[Union[str, Run]] = None,
38
+ code: Optional[Union[str, PathLike]] = None,
39
+ init: Optional[dict] = None,
40
+ **kwargs,
41
+ ) -> Run:
42
+ raise MissingRequiredPackage("Please install 'promptflow' package to use PFClient")
43
+
44
+ def get_details(self, run: Union[str, Run], max_results: int = 100, all_results: bool = False) -> pd.DataFrame:
45
+ return pd.DataFrame()
46
+
47
+ def get_metrics(self, run: Union[str, Run]) -> Dict[str, Any]:
48
+ return {}
49
+
50
+
51
+ PFClient: TypeAlias = _PFClient
@@ -0,0 +1,26 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing_extensions import TypeAlias
6
+
7
+
8
+ try:
9
+ from promptflow._sdk.entities import Run as _Run
10
+ except ImportError:
11
+ from typing_extensions import Protocol
12
+ from typing import Any, Dict, Optional
13
+ from datetime import datetime
14
+ from pathlib import Path
15
+
16
+ class _Run(Protocol):
17
+ name: str
18
+ status: str
19
+ _properties: Dict[str, Any]
20
+ _created_on: datetime
21
+ _end_time: Optional[datetime]
22
+ _experiment_name: Optional[str]
23
+ _output_path: Path
24
+
25
+
26
+ Run: TypeAlias = _Run
@@ -0,0 +1,28 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Callable, Dict, Final, Optional
6
+ from typing_extensions import TypeAlias
7
+
8
+
9
+ try:
10
+ from promptflow.tracing import ThreadPoolExecutorWithContext as _ThreadPoolExecutorWithContext
11
+ from promptflow.tracing._integrations._openai_injector import (
12
+ inject_openai_api as _inject,
13
+ recover_openai_api as _recover,
14
+ )
15
+ from promptflow.tracing import _start_trace
16
+ except ImportError:
17
+ from concurrent.futures import ThreadPoolExecutor as _ThreadPoolExecutorWithContext
18
+ from azure.ai.evaluation._legacy._batch_engine._openai_injector import (
19
+ inject_openai_api as _inject,
20
+ recover_openai_api as _recover,
21
+ )
22
+ from azure.ai.evaluation._legacy._batch_engine._trace import start_trace as _start_trace
23
+
24
+
25
+ ThreadPoolExecutorWithContext: TypeAlias = _ThreadPoolExecutorWithContext
26
+ inject_openai_api: Final[Callable[[], None]] = _inject
27
+ recover_openai_api: Final[Callable[[], None]] = _recover
28
+ start_trace: Final = _start_trace
@@ -0,0 +1,15 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Any
6
+
7
+
8
+ class AttrDict(dict):
9
+ """A dictionary that allows attribute access to its keys."""
10
+
11
+ def __getattr__(self, key: str) -> Any:
12
+ return self[key]
13
+
14
+ def __setattr__(self, key: str, value: Any) -> None:
15
+ self[key] = value
@@ -0,0 +1,31 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from typing import Final, Optional
6
+ from typing_extensions import TypeAlias
7
+
8
+
9
+ try:
10
+ from promptflow._utils.user_agent_utils import ClientUserAgentUtil as _ClientUserAgentUtil
11
+ from promptflow._utils.async_utils import async_run_allowing_running_loop as _async_run_allowing_running_loop
12
+ from promptflow._cli._utils import get_workspace_triad_from_local as _get_workspace_triad_from_local
13
+ except ImportError:
14
+ from azure.ai.evaluation._legacy._batch_engine._utils_deprecated import (
15
+ async_run_allowing_running_loop as _async_run_allowing_running_loop,
16
+ )
17
+ from azure.ai.evaluation._evaluate._utils import AzureMLWorkspace
18
+
19
+ class _ClientUserAgentUtil:
20
+ @staticmethod
21
+ def append_user_agent(user_agent: Optional[str]):
22
+ # TODO ralphe: implement?
23
+ pass
24
+
25
+ def _get_workspace_triad_from_local() -> AzureMLWorkspace:
26
+ return AzureMLWorkspace("", "", "")
27
+
28
+
29
+ ClientUserAgentUtil: TypeAlias = _ClientUserAgentUtil
30
+ async_run_allowing_running_loop: Final = _async_run_allowing_running_loop
31
+ get_workspace_triad_from_local: Final = _get_workspace_triad_from_local
@@ -0,0 +1,9 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ # NOTE: This is a direct port of the bare minimum needed for BatchEngine functionality from
6
+ # the original Promptflow code. The goal here is expediency, not elegance. As such
7
+ # parts of this code may be a little "quirky", seem incomplete in places, or contain
8
+ # longer TODOs comments than usual. In a future code update, large swaths of this code
9
+ # will be refactored or deleted outright.
@@ -0,0 +1,45 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ from dataclasses import dataclass
6
+ from logging import Logger
7
+
8
+ from ..._constants import PF_BATCH_TIMEOUT_SEC_DEFAULT
9
+
10
+
11
+ @dataclass
12
+ class BatchEngineConfig:
13
+ """Context for a batch of evaluations. This will contain the configuration,
14
+ logging, and other needed information."""
15
+
16
+ logger: Logger
17
+ """The logger to use for logging messages."""
18
+
19
+ batch_timeout_seconds: int = PF_BATCH_TIMEOUT_SEC_DEFAULT
20
+ """The maximum amount of time to wait for all evaluations in the batch to complete."""
21
+
22
+ run_timeout_seconds: int = 600
23
+ """The maximum amount of time to wait for an evaluation to run against a single entry
24
+ in the data input to complete."""
25
+
26
+ max_concurrency: int = 10
27
+ """The maximum number of evaluations to run concurrently."""
28
+
29
+ use_async: bool = True
30
+ """Whether to use asynchronous evaluation."""
31
+
32
+ default_num_results: int = 100
33
+ """The default number of results to return if you don't ask for all results."""
34
+
35
+ def __post_init__(self):
36
+ if self.logger is None:
37
+ raise ValueError("logger cannot be None")
38
+ if self.batch_timeout_seconds <= 0:
39
+ raise ValueError("batch_timeout_seconds must be greater than 0")
40
+ if self.run_timeout_seconds <= 0:
41
+ raise ValueError("run_timeout_seconds must be greater than 0")
42
+ if self.max_concurrency <= 0:
43
+ raise ValueError("max_concurrency must be greater than 0")
44
+ if self.default_num_results <= 0:
45
+ raise ValueError("default_num_results must be greater than 0")