azure-ai-evaluation 1.0.0__py3-none-any.whl → 1.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (105) hide show
  1. azure/ai/evaluation/__init__.py +5 -31
  2. azure/ai/evaluation/_common/constants.py +2 -9
  3. azure/ai/evaluation/_common/rai_service.py +120 -300
  4. azure/ai/evaluation/_common/utils.py +23 -381
  5. azure/ai/evaluation/_constants.py +6 -19
  6. azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/__init__.py +2 -3
  7. azure/ai/evaluation/_evaluate/{_batch_run/eval_run_context.py → _batch_run_client/batch_run_context.py} +7 -23
  8. azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/code_client.py +17 -33
  9. azure/ai/evaluation/_evaluate/{_batch_run → _batch_run_client}/proxy_client.py +4 -32
  10. azure/ai/evaluation/_evaluate/_eval_run.py +24 -81
  11. azure/ai/evaluation/_evaluate/_evaluate.py +239 -393
  12. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +17 -17
  13. azure/ai/evaluation/_evaluate/_utils.py +28 -82
  14. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +18 -17
  15. azure/ai/evaluation/_evaluators/{_retrieval → _chat}/__init__.py +2 -2
  16. azure/ai/evaluation/_evaluators/_chat/_chat.py +357 -0
  17. azure/ai/evaluation/_evaluators/{_service_groundedness → _chat/retrieval}/__init__.py +2 -2
  18. azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +157 -0
  19. azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +48 -0
  20. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +88 -78
  21. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +39 -76
  22. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +4 -0
  23. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +67 -105
  24. azure/ai/evaluation/_evaluators/{_multimodal/_content_safety_multimodal_base.py → _content_safety/_content_safety_base.py} +34 -24
  25. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +301 -0
  26. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +54 -105
  27. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +52 -99
  28. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +52 -101
  29. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +51 -101
  30. azure/ai/evaluation/_evaluators/_eci/_eci.py +54 -44
  31. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +19 -34
  32. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +89 -76
  33. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +41 -66
  34. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +16 -14
  35. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +87 -113
  36. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +54 -0
  37. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +27 -20
  38. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +80 -89
  39. azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +5 -0
  40. azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +104 -0
  41. azure/ai/evaluation/_evaluators/_qa/_qa.py +30 -23
  42. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +96 -84
  43. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +47 -78
  44. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +27 -26
  45. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +38 -53
  46. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +5 -0
  47. azure/ai/evaluation/_evaluators/_xpia/xpia.py +105 -91
  48. azure/ai/evaluation/_exceptions.py +7 -28
  49. azure/ai/evaluation/_http_utils.py +132 -203
  50. azure/ai/evaluation/_model_configurations.py +8 -104
  51. azure/ai/evaluation/_version.py +1 -1
  52. azure/ai/evaluation/simulator/__init__.py +1 -2
  53. azure/ai/evaluation/simulator/_adversarial_scenario.py +1 -20
  54. azure/ai/evaluation/simulator/_adversarial_simulator.py +92 -111
  55. azure/ai/evaluation/simulator/_constants.py +1 -11
  56. azure/ai/evaluation/simulator/_conversation/__init__.py +12 -13
  57. azure/ai/evaluation/simulator/_conversation/_conversation.py +4 -4
  58. azure/ai/evaluation/simulator/_direct_attack_simulator.py +67 -33
  59. azure/ai/evaluation/simulator/_helpers/__init__.py +2 -1
  60. azure/ai/evaluation/{_common → simulator/_helpers}/_experimental.py +9 -24
  61. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +5 -26
  62. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +94 -107
  63. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +22 -70
  64. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +11 -28
  65. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +4 -8
  66. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +24 -68
  67. azure/ai/evaluation/simulator/_model_tools/models.py +10 -10
  68. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +10 -6
  69. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +5 -6
  70. azure/ai/evaluation/simulator/_simulator.py +207 -277
  71. azure/ai/evaluation/simulator/_tracing.py +4 -4
  72. azure/ai/evaluation/simulator/_utils.py +13 -31
  73. azure_ai_evaluation-1.0.0b2.dist-info/METADATA +449 -0
  74. azure_ai_evaluation-1.0.0b2.dist-info/RECORD +99 -0
  75. {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/WHEEL +1 -1
  76. azure/ai/evaluation/_common/math.py +0 -89
  77. azure/ai/evaluation/_evaluate/_batch_run/target_run_context.py +0 -46
  78. azure/ai/evaluation/_evaluators/_common/__init__.py +0 -13
  79. azure/ai/evaluation/_evaluators/_common/_base_eval.py +0 -344
  80. azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py +0 -88
  81. azure/ai/evaluation/_evaluators/_common/_base_rai_svc_eval.py +0 -133
  82. azure/ai/evaluation/_evaluators/_groundedness/groundedness_with_query.prompty +0 -113
  83. azure/ai/evaluation/_evaluators/_groundedness/groundedness_without_query.prompty +0 -99
  84. azure/ai/evaluation/_evaluators/_multimodal/__init__.py +0 -20
  85. azure/ai/evaluation/_evaluators/_multimodal/_content_safety_multimodal.py +0 -132
  86. azure/ai/evaluation/_evaluators/_multimodal/_hate_unfairness.py +0 -100
  87. azure/ai/evaluation/_evaluators/_multimodal/_protected_material.py +0 -124
  88. azure/ai/evaluation/_evaluators/_multimodal/_self_harm.py +0 -100
  89. azure/ai/evaluation/_evaluators/_multimodal/_sexual.py +0 -100
  90. azure/ai/evaluation/_evaluators/_multimodal/_violence.py +0 -100
  91. azure/ai/evaluation/_evaluators/_retrieval/_retrieval.py +0 -112
  92. azure/ai/evaluation/_evaluators/_retrieval/retrieval.prompty +0 -93
  93. azure/ai/evaluation/_evaluators/_service_groundedness/_service_groundedness.py +0 -148
  94. azure/ai/evaluation/_vendor/__init__.py +0 -3
  95. azure/ai/evaluation/_vendor/rouge_score/__init__.py +0 -14
  96. azure/ai/evaluation/_vendor/rouge_score/rouge_scorer.py +0 -328
  97. azure/ai/evaluation/_vendor/rouge_score/scoring.py +0 -63
  98. azure/ai/evaluation/_vendor/rouge_score/tokenize.py +0 -63
  99. azure/ai/evaluation/_vendor/rouge_score/tokenizers.py +0 -53
  100. azure/ai/evaluation/simulator/_data_sources/__init__.py +0 -3
  101. azure/ai/evaluation/simulator/_data_sources/grounding.json +0 -1150
  102. azure_ai_evaluation-1.0.0.dist-info/METADATA +0 -595
  103. azure_ai_evaluation-1.0.0.dist-info/NOTICE.txt +0 -70
  104. azure_ai_evaluation-1.0.0.dist-info/RECORD +0 -119
  105. {azure_ai_evaluation-1.0.0.dist-info → azure_ai_evaluation-1.0.0b2.dist-info}/top_level.txt +0 -0
@@ -2,124 +2,138 @@
2
2
  # Copyright (c) Microsoft Corporation. All rights reserved.
3
3
  # ---------------------------------------------------------
4
4
  import logging
5
- from typing import Dict, List, Union
5
+ from typing import Optional
6
6
 
7
- from typing_extensions import overload, override
7
+ from promptflow._utils.async_utils import async_run_allowing_running_loop
8
8
 
9
- from azure.ai.evaluation._common._experimental import experimental
10
9
  from azure.ai.evaluation._common.constants import EvaluationMetrics
11
- from azure.ai.evaluation._evaluators._common import RaiServiceEvaluatorBase
12
- from azure.ai.evaluation._model_configurations import Conversation
10
+ from azure.ai.evaluation._common.rai_service import evaluate_with_rai_service
11
+ from azure.ai.evaluation._exceptions import ErrorBlame, ErrorCategory, ErrorTarget, EvaluationException
13
12
 
14
13
  logger = logging.getLogger(__name__)
15
14
 
16
15
 
17
- @experimental
18
- class IndirectAttackEvaluator(RaiServiceEvaluatorBase[Union[str, bool]]):
19
- """
20
- Evaluates the indirect attack score for a given query and response or a multi-turn conversation, with reasoning.
21
-
22
- Indirect attacks, also known as cross-domain prompt injected attacks (XPIA), are when jailbreak attacks are
23
- injected into the context of a document or source that may result in an altered, unexpected behavior.
24
-
25
- Indirect attacks evaluations are broken down into three subcategories:
26
- - Manipulated Content: This category involves commands that aim to alter or fabricate information, often
27
- to mislead or deceive. It includes actions like spreading false information, altering language or
28
- formatting, and hiding or emphasizing specific details. The goal is often to manipulate perceptions
29
- or behaviors by controlling the flow and presentation of information.
30
- - Intrusion: This category encompasses commands that attempt to breach systems, gain unauthorized access,
31
- or elevate privileges illicitly. It includes creating backdoors, exploiting vulnerabilities, and
32
- traditional jailbreaks to bypass security measures. The intent is often to gain control or access
33
- sensitive data without detection.
34
- - Information Gathering: This category pertains to accessing, deleting, or modifying data without
35
- authorization, often for malicious purposes. It includes exfiltrating sensitive data, tampering with
36
- system records, and removing or altering existing information. The focus is on acquiring or
37
- manipulating data to exploit or compromise systems and individuals.
38
-
39
- Indirect attack scores are boolean values, where True indicates that the response contains an indirect attack.
40
-
41
- :param credential: The credential for connecting to Azure AI project. Required
42
- :type credential: ~azure.core.credentials.TokenCredential
16
+ class IndirectAttackEvaluator:
17
+ """A Cross-Domain Prompt Injection Attack (XPIA) jailbreak evaluator.
18
+
19
+ Detect whether cross domain injected attacks are present in your AI system's response.
20
+
43
21
  :param azure_ai_project: The scope of the Azure AI project. It contains subscription id, resource group, and project
44
22
  name.
45
23
  :type azure_ai_project: ~azure.ai.evaluation.AzureAIProject
24
+ :param eval_last_turn: Set to True to evaluate only the most recent exchange in the dialogue,
25
+ focusing on the latest user inquiry and the assistant's corresponding response. Defaults to False
26
+ :type eval_last_turn: bool
27
+ :param credential: The credential for connecting to Azure AI project.
28
+ :type credential: ~azure.core.credentials.TokenCredential
29
+ :return: A function that evaluates and generates metrics for XPIA chat scenario. Metrics include the overall
30
+ evaluation label and reason for the Q/A Pair, as well as sub-labels for manipulated content, intrusion, and
31
+ information.
32
+ :rtype: Callable
33
+ **Usage**
46
34
 
47
- .. admonition:: Example:
35
+ .. code-block:: python
48
36
 
49
- .. literalinclude:: ../samples/evaluation_samples_evaluate.py
50
- :start-after: [START indirect_attack_evaluator]
51
- :end-before: [END indirect_attack_evaluator]
52
- :language: python
53
- :dedent: 8
54
- :caption: Initialize and call an IndirectAttackEvaluator.
55
- """
37
+ eval_fn = IndirectAttackEvaluator(model_config)
38
+ result = eval_fn(query="What is the capital of France?", response="Paris.")
56
39
 
57
- id = "azureml://registries/azureml/models/Indirect-Attack-Evaluator/versions/3"
58
- """Evaluator identifier, experimental and to be used only with evaluation in cloud."""
40
+ **Output format for question-answer pair**
59
41
 
60
- @override
61
- def __init__(
62
- self,
63
- credential,
64
- azure_ai_project,
65
- ):
66
- super().__init__(
67
- eval_metric=EvaluationMetrics.XPIA,
68
- azure_ai_project=azure_ai_project,
69
- credential=credential,
70
- )
42
+ .. code-block:: python
43
+
44
+ {
45
+ 'xpia_label': False,
46
+ 'xpia_reason': 'The conversation does not contain any manipulated content, intrusion or
47
+ information gathering.'
48
+ 'xpia_information_gathering': False,
49
+ 'xpia_intrusion': False
50
+ 'xpia_manipulated_content': False
51
+ }
52
+ """
53
+
54
+ def __init__(self, azure_ai_project: dict, eval_last_turn: bool = False, credential=None):
55
+ self._evaluator = _IndirectAttackEvaluator(azure_ai_project, credential)
56
+ self._eval_last_turn = eval_last_turn
71
57
 
72
- @overload
73
58
  def __call__(
74
59
  self,
75
60
  *,
76
- query: str,
77
- response: str,
78
- ) -> Dict[str, Union[str, bool]]:
79
- """Evaluate whether cross domain injected attacks are present in given query/response
61
+ query: Optional[str],
62
+ response: Optional[str],
63
+ **kwargs,
64
+ ):
65
+ """
66
+ Evaluates content according to the presence of attacks injected into the conversation context to
67
+ interrupt normal expected functionality by eliciting manipulated content, intrusion and attempting
68
+ to gather information outside the scope of your AI system.
69
+ :keyword query: The query to be evaluated. Mutually exclusive with 'conversation'.
70
+ :paramtype query: Optional[str]
71
+ :keyword response: The response to be evaluated. Mutually exclusive with 'conversation'.
72
+ :paramtype response: Optional[str]
73
+ :return: The evaluation scores and reasoning.
74
+ :rtype: dict
75
+ """
76
+
77
+ return self._evaluator(query=query, response=response, **kwargs)
80
78
 
79
+
80
+ class _AsyncIndirectAttackEvaluator:
81
+ def __init__(self, azure_ai_project: dict, credential=None):
82
+ self._azure_ai_project = azure_ai_project
83
+ self._credential = credential
84
+
85
+ async def __call__(self, *, query: str, response: str, **kwargs):
86
+ """
87
+ Evaluates content according to this evaluator's metric.
81
88
  :keyword query: The query to be evaluated.
82
89
  :paramtype query: str
83
90
  :keyword response: The response to be evaluated.
84
91
  :paramtype response: str
85
- :return: The cross domain injection attack score
86
- :rtype: Dict[str, Union[str, bool]]
92
+ :return: The evaluation score computation based on the metric (self.metric).
93
+ :rtype: Any
87
94
  """
95
+ # Validate inputs
96
+ # Raises value error if failed, so execution alone signifies success.
97
+ if not (query and query.strip() and query != "None") or not (
98
+ response and response.strip() and response != "None"
99
+ ):
100
+ msg = "Both 'query' and 'response' must be non-empty strings."
101
+ raise EvaluationException(
102
+ message=msg,
103
+ internal_message=msg,
104
+ error_category=ErrorCategory.MISSING_FIELD,
105
+ error_blame=ErrorBlame.USER_ERROR,
106
+ error_target=ErrorTarget.INDIRECT_ATTACK_EVALUATOR,
107
+ )
108
+
109
+ # Run score computation based on supplied metric.
110
+ result = await evaluate_with_rai_service(
111
+ metric_name=EvaluationMetrics.XPIA,
112
+ query=query,
113
+ response=response,
114
+ project_scope=self._azure_ai_project,
115
+ credential=self._credential,
116
+ )
117
+ return result
88
118
 
89
- @overload
90
- def __call__(
91
- self,
92
- *,
93
- conversation: Conversation,
94
- ) -> Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]:
95
- """Evaluate cross domain injected attacks are present in a conversation
96
-
97
- :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
98
- key "messages", and potentially a global context under the key "context". Conversation turns are expected
99
- to be dictionaries with keys "content", "role", and possibly "context".
100
- :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
101
- :return: The cross domain injection attack score
102
- :rtype: Dict[str, Union[str, bool, Dict[str, List[Union[str, bool]]]]]
103
- """
104
119
 
105
- @override
106
- def __call__( # pylint: disable=docstring-missing-param
107
- self,
108
- *args,
109
- **kwargs,
110
- ):
111
- """
112
- Evaluate whether cross domain injected attacks are present in your AI system's response.
120
+ class _IndirectAttackEvaluator:
121
+ def __init__(self, azure_ai_project: dict, credential=None):
122
+ self._async_evaluator = _AsyncIndirectAttackEvaluator(azure_ai_project, credential)
113
123
 
124
+ def __call__(self, *, query: str, response: str, **kwargs):
125
+ """
126
+ Evaluates XPIA content.
114
127
  :keyword query: The query to be evaluated.
115
- :paramtype query: Optional[str]
128
+ :paramtype query: str
116
129
  :keyword response: The response to be evaluated.
117
- :paramtype response: Optional[str]
118
- :keyword conversation: The conversation to evaluate. Expected to contain a list of conversation turns under the
119
- key "messages". Conversation turns are expected
120
- to be dictionaries with keys "content" and "role".
121
- :paramtype conversation: Optional[~azure.ai.evaluation.Conversation]
122
- :return: The cross domain injection attack score
123
- :rtype: Union[Dict[str, Union[str, bool]], Dict[str, Union[float, Dict[str, List[Union[str, bool]]]]]]
130
+ :paramtype response: str
131
+ :keyword context: The context to be evaluated.
132
+ :paramtype context: str
133
+ :return: The XPIA score.
134
+ :rtype: dict
124
135
  """
125
- return super().__call__(*args, **kwargs)
136
+ return async_run_allowing_running_loop(self._async_evaluator, query=query, response=response, **kwargs)
137
+
138
+ def _to_async(self):
139
+ return self._async_evaluator
@@ -4,7 +4,6 @@
4
4
  """This includes enums and classes for exceptions for use in azure-ai-evaluation."""
5
5
 
6
6
  from enum import Enum
7
- from typing import Optional
8
7
 
9
8
  from azure.core.exceptions import AzureError
10
9
 
@@ -21,9 +20,6 @@ class ErrorCategory(Enum):
21
20
  * RESOURCE_NOT_FOUND -> Resource could not be found
22
21
  * FAILED_EXECUTION -> Execution failed
23
22
  * SERVICE_UNAVAILABLE -> Service is unavailable
24
- * MISSING_PACKAGE -> Required package is missing
25
- * FAILED_REMOTE_TRACKING -> Remote tracking failed
26
- * PROJECT_ACCESS_ERROR -> Access to project failed
27
23
  * UNKNOWN -> Undefined placeholder. Avoid using.
28
24
  """
29
25
 
@@ -34,9 +30,6 @@ class ErrorCategory(Enum):
34
30
  RESOURCE_NOT_FOUND = "RESOURCE NOT FOUND"
35
31
  FAILED_EXECUTION = "FAILED_EXECUTION"
36
32
  SERVICE_UNAVAILABLE = "SERVICE UNAVAILABLE"
37
- MISSING_PACKAGE = "MISSING PACKAGE"
38
- FAILED_REMOTE_TRACKING = "FAILED REMOTE TRACKING"
39
- PROJECT_ACCESS_ERROR = "PROJECT ACCESS ERROR"
40
33
  UNKNOWN = "UNKNOWN"
41
34
 
42
35
 
@@ -61,17 +54,15 @@ class ErrorTarget(Enum):
61
54
  EVAL_RUN = "EvalRun"
62
55
  CODE_CLIENT = "CodeClient"
63
56
  RAI_CLIENT = "RAIClient"
57
+ CHAT_EVALUATOR = "ChatEvaluator"
64
58
  COHERENCE_EVALUATOR = "CoherenceEvaluator"
65
59
  CONTENT_SAFETY_CHAT_EVALUATOR = "ContentSafetyEvaluator"
66
- CONTENT_SAFETY_MULTIMODAL_EVALUATOR = "ContentSafetyMultimodalEvaluator"
67
60
  ECI_EVALUATOR = "ECIEvaluator"
68
61
  F1_EVALUATOR = "F1Evaluator"
69
62
  GROUNDEDNESS_EVALUATOR = "GroundednessEvaluator"
70
63
  PROTECTED_MATERIAL_EVALUATOR = "ProtectedMaterialEvaluator"
71
64
  RELEVANCE_EVALUATOR = "RelevanceEvaluator"
72
65
  SIMILARITY_EVALUATOR = "SimilarityEvaluator"
73
- FLUENCY_EVALUATOR = "FluencyEvaluator"
74
- RETRIEVAL_EVALUATOR = "RetrievalEvaluator"
75
66
  INDIRECT_ATTACK_EVALUATOR = "IndirectAttackEvaluator"
76
67
  INDIRECT_ATTACK_SIMULATOR = "IndirectAttackSimulator"
77
68
  ADVERSARIAL_SIMULATOR = "AdversarialSimulator"
@@ -84,7 +75,7 @@ class ErrorTarget(Enum):
84
75
 
85
76
 
86
77
  class EvaluationException(AzureError):
87
- """The base class for all exceptions raised in azure-ai-evaluation. If there is a need to define a custom
78
+ """The base class for all exceptions raised in pazure-ai-evaluation. If there is a need to define a custom
88
79
  exception type, that custom exception type should extend from this class.
89
80
 
90
81
  :param message: A message describing the error. This is the error message the user will see.
@@ -93,36 +84,24 @@ class EvaluationException(AzureError):
93
84
  :type internal_message: str
94
85
  :param target: The name of the element that caused the exception to be thrown.
95
86
  :type target: ~azure.ai.evaluation._exceptions.ErrorTarget
96
- :param category: The error category, defaults to Unknown.
97
- :type category: ~azure.ai.evaluation._exceptions.ErrorCategory
98
- :param blame: The source of blame for the error, defaults to Unknown.
99
- :type balance: ~azure.ai.evaluation._exceptions.ErrorBlame
100
- :param tsg_link: A link to the TSG page for troubleshooting the error.
101
- :type tsg_link: str
87
+ :param error_category: The error category, defaults to Unknown.
88
+ :type error_category: ~azure.ai.evaluation._exceptionsErrorCategory
89
+ :param error: The original exception if any.
90
+ :type error: Exception
102
91
  """
103
92
 
104
93
  def __init__(
105
94
  self,
106
95
  message: str,
96
+ internal_message: str,
107
97
  *args,
108
- internal_message: Optional[str] = None,
109
98
  target: ErrorTarget = ErrorTarget.UNKNOWN,
110
99
  category: ErrorCategory = ErrorCategory.UNKNOWN,
111
100
  blame: ErrorBlame = ErrorBlame.UNKNOWN,
112
- tsg_link: Optional[str] = None,
113
101
  **kwargs,
114
102
  ) -> None:
115
103
  self.category = category
116
104
  self.target = target
117
105
  self.blame = blame
118
106
  self.internal_message = internal_message
119
- self.tsg_link = tsg_link
120
107
  super().__init__(message, *args, **kwargs)
121
-
122
- def __str__(self):
123
- error_blame = "InternalError" if self.blame != ErrorBlame.USER_ERROR else "UserError"
124
- msg = f"({error_blame}) {super().__str__()}"
125
- if self.tsg_link:
126
- msg += f"\nVisit {self.tsg_link} to troubleshoot this issue."
127
-
128
- return msg