validmind 2.2.5__py3-none-any.whl → 2.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. validmind/__version__.py +1 -1
  2. validmind/{ai.py → ai/test_descriptions.py} +127 -69
  3. validmind/ai/utils.py +104 -0
  4. validmind/api_client.py +70 -31
  5. validmind/client.py +5 -5
  6. validmind/logging.py +38 -32
  7. validmind/models/foundation.py +10 -6
  8. validmind/models/function.py +3 -1
  9. validmind/models/metadata.py +1 -1
  10. validmind/test_suites/__init__.py +1 -7
  11. validmind/test_suites/regression.py +0 -16
  12. validmind/test_suites/statsmodels_timeseries.py +1 -1
  13. validmind/tests/data_validation/ACFandPACFPlot.py +36 -27
  14. validmind/tests/{model_validation/statsmodels → data_validation}/ADF.py +42 -13
  15. validmind/tests/data_validation/BivariateScatterPlots.py +38 -41
  16. validmind/tests/{model_validation/statsmodels → data_validation}/DFGLSArch.py +67 -11
  17. validmind/tests/data_validation/HeatmapFeatureCorrelations.py +1 -1
  18. validmind/tests/data_validation/HighPearsonCorrelation.py +12 -3
  19. validmind/tests/data_validation/IsolationForestOutliers.py +2 -2
  20. validmind/tests/{model_validation/statsmodels → data_validation}/KPSS.py +64 -11
  21. validmind/tests/{model_validation/statsmodels → data_validation}/PhillipsPerronArch.py +65 -11
  22. validmind/tests/data_validation/ScatterPlot.py +1 -1
  23. validmind/tests/data_validation/SeasonalDecompose.py +12 -7
  24. validmind/tests/data_validation/TabularDateTimeHistograms.py +29 -33
  25. validmind/tests/data_validation/WOEBinPlots.py +1 -1
  26. validmind/tests/data_validation/WOEBinTable.py +1 -1
  27. validmind/tests/{model_validation/statsmodels → data_validation}/ZivotAndrewsArch.py +65 -11
  28. validmind/tests/data_validation/nlp/CommonWords.py +1 -1
  29. validmind/tests/data_validation/nlp/Hashtags.py +1 -1
  30. validmind/tests/data_validation/nlp/Mentions.py +1 -1
  31. validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py +2 -1
  32. validmind/tests/data_validation/nlp/Punctuations.py +1 -1
  33. validmind/tests/data_validation/nlp/Sentiment.py +1 -1
  34. validmind/tests/data_validation/nlp/TextDescription.py +5 -1
  35. validmind/tests/data_validation/nlp/Toxicity.py +1 -1
  36. validmind/tests/decorator.py +1 -1
  37. validmind/tests/model_validation/FeaturesAUC.py +5 -3
  38. validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py +4 -0
  39. validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py +4 -0
  40. validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py +4 -0
  41. validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py +4 -0
  42. validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py +4 -0
  43. validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py +4 -0
  44. validmind/tests/model_validation/ragas/AnswerCorrectness.py +3 -3
  45. validmind/tests/model_validation/ragas/AnswerRelevance.py +5 -4
  46. validmind/tests/model_validation/ragas/AnswerSimilarity.py +5 -4
  47. validmind/tests/model_validation/ragas/AspectCritique.py +14 -8
  48. validmind/tests/model_validation/ragas/ContextEntityRecall.py +3 -4
  49. validmind/tests/model_validation/ragas/ContextPrecision.py +4 -5
  50. validmind/tests/model_validation/ragas/ContextRecall.py +3 -4
  51. validmind/tests/model_validation/ragas/ContextRelevancy.py +5 -4
  52. validmind/tests/model_validation/ragas/Faithfulness.py +6 -5
  53. validmind/tests/model_validation/ragas/utils.py +35 -9
  54. validmind/tests/model_validation/sklearn/ClusterPerformance.py +2 -2
  55. validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py +1 -1
  56. validmind/tests/model_validation/sklearn/ModelsPerformanceComparison.py +6 -8
  57. validmind/tests/model_validation/sklearn/RegressionErrors.py +1 -1
  58. validmind/tests/model_validation/sklearn/RegressionModelsPerformanceComparison.py +14 -8
  59. validmind/tests/model_validation/sklearn/RegressionR2Square.py +1 -1
  60. validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py +1 -1
  61. validmind/tests/model_validation/statsmodels/GINITable.py +1 -1
  62. validmind/tests/model_validation/statsmodels/JarqueBera.py +1 -1
  63. validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py +1 -1
  64. validmind/tests/model_validation/statsmodels/LJungBox.py +1 -1
  65. validmind/tests/model_validation/statsmodels/Lilliefors.py +1 -1
  66. validmind/tests/model_validation/statsmodels/RegressionCoeffsPlot.py +4 -0
  67. validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py +9 -4
  68. validmind/tests/model_validation/statsmodels/RegressionModelsCoeffs.py +2 -2
  69. validmind/tests/model_validation/statsmodels/RunsTest.py +1 -1
  70. validmind/tests/model_validation/statsmodels/ShapiroWilk.py +1 -1
  71. validmind/tests/prompt_validation/Bias.py +14 -11
  72. validmind/tests/prompt_validation/Clarity.py +14 -11
  73. validmind/tests/prompt_validation/Conciseness.py +14 -11
  74. validmind/tests/prompt_validation/Delimitation.py +14 -11
  75. validmind/tests/prompt_validation/NegativeInstruction.py +14 -11
  76. validmind/tests/prompt_validation/Robustness.py +11 -11
  77. validmind/tests/prompt_validation/Specificity.py +14 -11
  78. validmind/tests/prompt_validation/ai_powered_test.py +53 -75
  79. validmind/unit_metrics/composite.py +2 -1
  80. validmind/utils.py +4 -49
  81. validmind/vm_models/dataset/dataset.py +17 -3
  82. validmind/vm_models/dataset/utils.py +2 -2
  83. validmind/vm_models/model.py +1 -1
  84. validmind/vm_models/test/metric.py +1 -8
  85. validmind/vm_models/test/result_wrapper.py +27 -34
  86. validmind/vm_models/test/test.py +3 -0
  87. validmind/vm_models/test/threshold_test.py +1 -1
  88. validmind/vm_models/test_suite/runner.py +12 -6
  89. validmind/vm_models/test_suite/summary.py +18 -7
  90. validmind/vm_models/test_suite/test.py +13 -20
  91. {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/METADATA +1 -1
  92. {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/RECORD +95 -104
  93. validmind/tests/data_validation/DefaultRatesbyRiskBandPlot.py +0 -114
  94. validmind/tests/data_validation/PiTCreditScoresHistogram.py +0 -150
  95. validmind/tests/data_validation/PiTPDHistogram.py +0 -152
  96. validmind/tests/model_validation/statsmodels/ADFTest.py +0 -88
  97. validmind/tests/model_validation/statsmodels/FeatureImportanceAndSignificance.py +0 -198
  98. validmind/tests/model_validation/statsmodels/PDRatingClassPlot.py +0 -151
  99. validmind/tests/model_validation/statsmodels/RegressionModelInsampleComparison.py +0 -146
  100. validmind/tests/model_validation/statsmodels/RegressionModelOutsampleComparison.py +0 -144
  101. validmind/tests/model_validation/statsmodels/RegressionModelsPerformance.py +0 -127
  102. validmind/tests/model_validation/statsmodels/ResidualsVisualInspection.py +0 -130
  103. {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/LICENSE +0 -0
  104. {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/WHEEL +0 -0
  105. {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/entry_points.txt +0 -0
@@ -195,7 +195,19 @@ class VMDataset:
195
195
  probability_column: str = None,
196
196
  probability_values: list = None,
197
197
  prediction_probabilities: list = None, # DEPRECATED: use probability_values
198
+ **kwargs,
198
199
  ):
200
+ """Assign predictions and probabilities to the dataset.
201
+
202
+ Args:
203
+ model (VMModel): The model used to generate the predictions.
204
+ prediction_column (str, optional): The name of the column containing the predictions. Defaults to None.
205
+ prediction_values (list, optional): The values of the predictions. Defaults to None.
206
+ probability_column (str, optional): The name of the column containing the probabilities. Defaults to None.
207
+ probability_values (list, optional): The values of the probabilities. Defaults to None.
208
+ prediction_probabilities (list, optional): DEPRECATED: The values of the probabilities. Defaults to None.
209
+ kwargs: Additional keyword arguments that will get passed through to the model's `predict` method.
210
+ """
199
211
  if prediction_probabilities is not None:
200
212
  warnings.warn(
201
213
  "The `prediction_probabilities` argument is deprecated. Use `probability_values` instead.",
@@ -226,7 +238,9 @@ class VMDataset:
226
238
 
227
239
  if prediction_values is None:
228
240
  X = self.df if isinstance(model, (FunctionModel, PipelineModel)) else self.x
229
- probability_values, prediction_values = compute_predictions(model, X)
241
+ probability_values, prediction_values = compute_predictions(
242
+ model, X, **kwargs
243
+ )
230
244
 
231
245
  prediction_column = prediction_column or f"{model.input_id}_prediction"
232
246
  self._add_column(prediction_column, prediction_values)
@@ -356,8 +370,8 @@ class VMDataset:
356
370
  return as_df(self.df[self.probability_column(model)])
357
371
 
358
372
  def target_classes(self):
359
- """Returns the unique number of target classes for the target (Y) variable"""
360
- return [str(i) for i in np.unique(self.y)]
373
+ """Returns the target class labels or unique values of the target column."""
374
+ return self.target_class_labels or [str(i) for i in np.unique(self.y)]
361
375
 
362
376
  def __str__(self):
363
377
  return (
@@ -94,7 +94,7 @@ def _is_probabilties(output):
94
94
  return np.all((output >= 0) & (output <= 1)) and np.any((output > 0) & (output < 1))
95
95
 
96
96
 
97
- def compute_predictions(model, X) -> tuple:
97
+ def compute_predictions(model, X, **kwargs) -> tuple:
98
98
  probability_values = None
99
99
 
100
100
  try:
@@ -108,7 +108,7 @@ def compute_predictions(model, X) -> tuple:
108
108
 
109
109
  try:
110
110
  logger.info("Running predict()... This may take a while")
111
- prediction_values = model.predict(X)
111
+ prediction_values = model.predict(X, **kwargs)
112
112
  logger.info("Done running predict()")
113
113
  except MissingOrInvalidModelPredictFnError:
114
114
  raise MissingOrInvalidModelPredictFnError(
@@ -114,7 +114,7 @@ class VMModel(ABC):
114
114
 
115
115
  self.__post_init__()
116
116
 
117
- def __post_init__(self):
117
+ def __post_init__(self): # noqa: B027
118
118
  """Allows child classes to add their own post-init logic"""
119
119
  pass
120
120
 
@@ -12,8 +12,8 @@ from typing import ClassVar, List, Optional, Union
12
12
 
13
13
  import pandas as pd
14
14
 
15
+ from ...ai.test_descriptions import get_description_metadata
15
16
  from ...errors import MissingCacheResultsArgumentsError
16
- from ...utils import get_description_metadata
17
17
  from ..figure import Figure
18
18
  from .metric_result import MetricResult
19
19
  from .result_wrapper import MetricResultWrapper
@@ -36,13 +36,6 @@ class Metric(Test):
36
36
  # Instance Variables
37
37
  result: MetricResultWrapper = None # populated by cache_results() method
38
38
 
39
- @property
40
- def key(self):
41
- """
42
- Keep the key for compatibility reasons
43
- """
44
- return self._key if hasattr(self, "_key") else self.name
45
-
46
39
  @abstractmethod
47
40
  def summary(self, metric_value: Optional[Union[dict, list, pd.DataFrame]] = None):
48
41
  """
@@ -7,7 +7,6 @@ Result Wrappers for test and metric results
7
7
  """
8
8
  import asyncio
9
9
  import json
10
- import os
11
10
  from abc import ABC, abstractmethod
12
11
  from dataclasses import dataclass
13
12
  from typing import Dict, List, Optional, Union
@@ -16,10 +15,10 @@ import pandas as pd
16
15
  from ipywidgets import HTML, GridBox, Layout, VBox
17
16
 
18
17
  from ... import api_client
19
- from ...ai import DescriptionFuture
18
+ from ...ai.test_descriptions import AI_REVISION_NAME, DescriptionFuture
20
19
  from ...input_registry import input_registry
21
20
  from ...logging import get_logger
22
- from ...utils import NumpyEncoder, display, md_to_html, run_async, test_id_to_name
21
+ from ...utils import NumpyEncoder, display, run_async, test_id_to_name
23
22
  from ..dataset import VMDataset
24
23
  from ..figure import Figure
25
24
  from .metric_result import MetricResult
@@ -31,31 +30,35 @@ logger = get_logger(__name__)
31
30
 
32
31
 
33
32
  async def update_metadata(content_id: str, text: str, _json: Union[Dict, List] = None):
34
- """
35
- Update the metadata of a content item. By default we don't
36
- override the existing metadata, but we can override it by
37
- setting the VM_OVERRIDE_METADATA environment variable to True
38
- """
39
- should_update = False
40
-
41
- # check if the env variable is set to force overwriting metadata
42
- if os.environ.get("VM_OVERRIDE_METADATA", "false").lower() == "true":
43
- should_update = True
33
+ """Create or Update a Metadata Object"""
34
+ parts = content_id.split("::")
35
+ content_id = parts[0]
36
+ revision_name = parts[1] if len(parts) > 1 else None
44
37
 
45
- # if not set, check if the content_id is a composite metric def
46
- if not should_update and content_id.startswith("composite_metric_def:"):
47
- # we always want composite metric definitions to be updated
48
- should_update = True
38
+ # we always want composite metric definitions to be updated
39
+ should_update = content_id.startswith("composite_metric_def:")
49
40
 
50
- # if not set, lets check if the metadata already exists
51
- if not should_update:
41
+ # if we are updating a metric or test description, we check if the text
42
+ # has changed from the last time it was logged, and only update if it has
43
+ if content_id.split(":", 1)[0] in ["metric_description", "test_description"]:
52
44
  try:
53
- await api_client.get_metadata(content_id)
54
- except Exception: # TODO: this shouldn't be a catch-all
55
- # if the metadata doesn't exist, we should create (update) it
45
+ md = await api_client.get_metadata(content_id)
46
+ # if there is an existing description, only update it if the new one
47
+ # is different and is an AI-generated description
48
+ should_update = (
49
+ md["text"] != text if revision_name == AI_REVISION_NAME else False
50
+ )
51
+ logger.debug(f"Check if description has changed: {should_update}")
52
+ except Exception:
53
+ # if exception, assume its not created yet TODO: don't catch all
56
54
  should_update = True
57
55
 
58
56
  if should_update:
57
+ if revision_name:
58
+ content_id = f"{content_id}::{revision_name}"
59
+
60
+ logger.debug(f"Updating metadata for `{content_id}`")
61
+
59
62
  await api_client.log_metadata(content_id, text, _json)
60
63
 
61
64
 
@@ -102,12 +105,6 @@ class ResultWrapper(ABC):
102
105
 
103
106
  return self.to_widget()
104
107
 
105
- def _markdown_description_to_html(self, description: str):
106
- """
107
- Convert a markdown string to html
108
- """
109
- return md_to_html(description)
110
-
111
108
  def _summary_tables_to_widget(self, summary: ResultSummary):
112
109
  """
113
110
  Create an ipywdiget representation of the summary tables
@@ -277,9 +274,7 @@ class MetricResultWrapper(ResultWrapper):
277
274
  metric_description = metric_description.get_description()
278
275
  self.result_metadata[0]["text"] = metric_description
279
276
 
280
- vbox_children.append(
281
- HTML(value=self._markdown_description_to_html(metric_description))
282
- )
277
+ vbox_children.append(HTML(value=metric_description))
283
278
 
284
279
  if self.metric:
285
280
  if self.output_template:
@@ -464,9 +459,7 @@ class ThresholdTestResultWrapper(ResultWrapper):
464
459
  metric_description = metric_description.get_description()
465
460
  self.result_metadata[0]["text"] = metric_description
466
461
 
467
- description_html.append(
468
- self._markdown_description_to_html(metric_description)
469
- )
462
+ description_html.append(metric_description)
470
463
 
471
464
  description_html.append(
472
465
  f"""
@@ -52,6 +52,9 @@ class Test(TestUtils):
52
52
  "test_id is missing. It must be passed when initializing the test"
53
53
  )
54
54
  self._ref_id = str(uuid4())
55
+ self.key = (
56
+ self.test_id
57
+ ) # for backwards compatibility - figures really should get keyed automatically
55
58
 
56
59
  # TODO: add validation for required inputs
57
60
  if self.default_params is None:
@@ -11,7 +11,7 @@ avoid confusion with the "tests" in the general data science/modeling sense.
11
11
  from dataclasses import dataclass
12
12
  from typing import ClassVar, List, Optional
13
13
 
14
- from ...utils import get_description_metadata
14
+ from ...ai.test_descriptions import get_description_metadata
15
15
  from ..figure import Figure
16
16
  from .result_summary import ResultSummary, ResultTable
17
17
  from .result_wrapper import ThresholdTestResultWrapper
@@ -83,11 +83,14 @@ class TestSuiteRunner:
83
83
  test_configs = test_configs.get("params", {})
84
84
  else:
85
85
  if (test_configs) and ("params" not in test_configs):
86
- """[DEPRECATED] Deprecated method for setting test parameters directly in the 'config' parameter"""
87
- logger.info(
88
- "Setting test parameters directly in the 'config' parameter of the run_documentation_tests() method is deprecated. "
89
- 'Instead, use the new format of the config: config = {"test_id": {"params": {...}, "inputs": {...}}}'
86
+ # [DEPRECATED] This is the old way of setting test parameters
87
+ msg = (
88
+ "Setting test parameters directly in the 'config' parameter"
89
+ " of the run_documentation_tests() method is deprecated. "
90
+ "Instead, use the new format of the config: "
91
+ 'config = {"test_id": {"params": {...}, "inputs": {...}}}'
90
92
  )
93
+ logger.warning(msg)
91
94
 
92
95
  test.load(inputs=inputs, context=self.context, config=test_configs)
93
96
 
@@ -145,14 +148,17 @@ class TestSuiteRunner:
145
148
 
146
149
  await asyncio.sleep(0.5)
147
150
 
148
- def summarize(self):
151
+ def summarize(self, show_link: bool = True):
149
152
  if not is_notebook():
150
153
  return logger.info("Test suite done...")
151
154
 
155
+ self.pbar_description.value = "Collecting test results..."
156
+
152
157
  summary = TestSuiteSummary(
153
158
  title=self.suite.title,
154
159
  description=self.suite.description,
155
160
  sections=self.suite.sections,
161
+ show_link=show_link,
156
162
  )
157
163
  summary.display()
158
164
 
@@ -181,6 +187,6 @@ class TestSuiteRunner:
181
187
  run_async(self.log_results)
182
188
  run_async_check(self._check_progress)
183
189
 
184
- self.summarize()
190
+ self.summarize(show_link=send)
185
191
 
186
192
  self._stop_progress_bar()
@@ -35,8 +35,14 @@ class TestSuiteSectionSummary:
35
35
  self._build_summary()
36
36
 
37
37
  def _add_description(self):
38
- description = f'<div class="result">{md_to_html(self.description)}</div>'
39
- self._widgets.append(widgets.HTML(value=description))
38
+ if not self.description:
39
+ return
40
+
41
+ self._widgets.append(
42
+ widgets.HTML(
43
+ value=f'<div class="result">{md_to_html(self.description)}</div>'
44
+ )
45
+ )
40
46
 
41
47
  def _add_tests_summary(self):
42
48
  children = []
@@ -45,9 +51,9 @@ class TestSuiteSectionSummary:
45
51
  for test in self.tests:
46
52
  children.append(test.result.to_widget())
47
53
  titles.append(
48
- f"❌ {test.result.name}: {test.title} ({test.test_id})"
54
+ f"❌ {test.result.name}: {test.name} ({test.test_id})"
49
55
  if isinstance(test.result, FailedResultWrapper)
50
- else f"{test.result.name}: {test.title} ({test.test_id})"
56
+ else f"{test.result.name}: {test.name} ({test.test_id})"
51
57
  )
52
58
 
53
59
  self._widgets.append(widgets.Accordion(children=children, titles=titles))
@@ -71,6 +77,7 @@ class TestSuiteSummary:
71
77
  title: str
72
78
  description: str
73
79
  sections: List[TestSuiteSection]
80
+ show_link: bool = True
74
81
 
75
82
  _widgets: List[widgets.Widget] = None
76
83
 
@@ -100,8 +107,11 @@ class TestSuiteSummary:
100
107
  self._widgets.append(widgets.HTML(value=results_link))
101
108
 
102
109
  def _add_description(self):
103
- description = f'<div class="result">{md_to_html(self.description)}</div>'
104
- self._widgets.append(widgets.HTML(value=description))
110
+ self._widgets.append(
111
+ widgets.HTML(
112
+ value=f'<div class="result">{md_to_html(self.description)}</div>'
113
+ )
114
+ )
105
115
 
106
116
  def _add_sections_summary(self):
107
117
  children = []
@@ -145,7 +155,8 @@ class TestSuiteSummary:
145
155
  self._widgets = []
146
156
 
147
157
  self._add_title()
148
- self._add_results_link()
158
+ if self.show_link:
159
+ self._add_results_link()
149
160
  self._add_description()
150
161
  if len(self.sections) == 1:
151
162
  self._add_top_level_section_summary()
@@ -21,6 +21,7 @@ class TestSuiteTest:
21
21
 
22
22
  test_id: str
23
23
  output_template: str = None
24
+ name: str = None
24
25
 
25
26
  _test_class: Test = None
26
27
  _test_instance: Test = None
@@ -39,6 +40,8 @@ class TestSuiteTest:
39
40
  self.test_id = test_id_or_obj["id"]
40
41
  self.output_template = test_id_or_obj.get("output_template")
41
42
 
43
+ self.name = test_id_to_name(self.test_id)
44
+
42
45
  try:
43
46
  self._test_class = load_test_class(self.test_id)
44
47
  except LoadTestError as e:
@@ -52,14 +55,6 @@ class TestSuiteTest:
52
55
  # since _test_class is None
53
56
  logger.error(f"Failed to load test '{self.test_id}': {e}")
54
57
 
55
- @property
56
- def title(self):
57
- return test_id_to_name(self.test_id)
58
-
59
- @property
60
- def name(self):
61
- return self._test_class.name
62
-
63
58
  @property
64
59
  def test_type(self):
65
60
  return self._test_class.test_type
@@ -86,12 +81,12 @@ class TestSuiteTest:
86
81
  )
87
82
  except Exception as e:
88
83
  logger.error(
89
- f"Failed to load test '{self._test_class.name}': "
84
+ f"Failed to load test '{self.test_id}': "
90
85
  f"({e.__class__.__name__}) {e}"
91
86
  )
92
87
  self.result = FailedResultWrapper(
93
88
  error=e,
94
- message=f"Failed to load test '{self.test_id}'",
89
+ message=f"Failed to load test '{self.name}'",
95
90
  result_id=self.test_id,
96
91
  )
97
92
 
@@ -107,7 +102,7 @@ class TestSuiteTest:
107
102
  # run the test and log the performance if LOG_LEVEL is set to DEBUG
108
103
  log_performance(
109
104
  func=self._test_instance.run,
110
- name=self._test_instance.name,
105
+ name=self.test_id,
111
106
  logger=logger,
112
107
  )() # this is a decorator so we need to call it
113
108
 
@@ -116,14 +111,13 @@ class TestSuiteTest:
116
111
  raise e # Re-raise the exception if we are in fail fast mode
117
112
 
118
113
  logger.error(
119
- f"Failed to run test '{self._test_instance.name}': "
120
- f"({e.__class__.__name__}) {e}"
114
+ f"Failed to run test '{self.test_id}': " f"({e.__class__.__name__}) {e}"
121
115
  )
122
116
  self.result = FailedResultWrapper(
123
117
  name=f"Failed {self._test_instance.test_type}",
124
118
  error=e,
125
- message=f"Failed to run '{self._test_instance.name}'",
126
- result_id=self._test_instance.name,
119
+ message=f"Failed to run '{self.name}'",
120
+ result_id=self.test_id,
127
121
  )
128
122
 
129
123
  return
@@ -132,8 +126,8 @@ class TestSuiteTest:
132
126
  self.result = FailedResultWrapper(
133
127
  name=f"Failed {self._test_instance.test_type}",
134
128
  error=None,
135
- message=f"'{self._test_instance.name}' did not return a result",
136
- result_id=self._test_instance.name,
129
+ message=f"'{self.name}' did not return a result",
130
+ result_id=self.test_id,
137
131
  )
138
132
 
139
133
  return
@@ -142,9 +136,8 @@ class TestSuiteTest:
142
136
  self.result = FailedResultWrapper(
143
137
  name=f"Failed {self._test_instance.test_type}",
144
138
  error=None,
145
- message=f"'{self._test_instance.name}' returned an invalid result: "
146
- f"{self._test_instance.result}",
147
- result_id=self._test_instance.name,
139
+ message=f"{self.name} returned an invalid result: {self._test_instance.result}",
140
+ result_id=self.test_id,
148
141
  )
149
142
 
150
143
  return
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: validmind
3
- Version: 2.2.5
3
+ Version: 2.3.1
4
4
  Summary: ValidMind Developer Framework
5
5
  License: Commercial License
6
6
  Author: Andres Rodriguez