validmind 2.2.5__py3-none-any.whl → 2.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- validmind/__version__.py +1 -1
- validmind/{ai.py → ai/test_descriptions.py} +127 -69
- validmind/ai/utils.py +104 -0
- validmind/api_client.py +70 -31
- validmind/client.py +5 -5
- validmind/logging.py +38 -32
- validmind/models/foundation.py +10 -6
- validmind/models/function.py +3 -1
- validmind/models/metadata.py +1 -1
- validmind/test_suites/__init__.py +1 -7
- validmind/test_suites/regression.py +0 -16
- validmind/test_suites/statsmodels_timeseries.py +1 -1
- validmind/tests/data_validation/ACFandPACFPlot.py +36 -27
- validmind/tests/{model_validation/statsmodels → data_validation}/ADF.py +42 -13
- validmind/tests/data_validation/BivariateScatterPlots.py +38 -41
- validmind/tests/{model_validation/statsmodels → data_validation}/DFGLSArch.py +67 -11
- validmind/tests/data_validation/HeatmapFeatureCorrelations.py +1 -1
- validmind/tests/data_validation/HighPearsonCorrelation.py +12 -3
- validmind/tests/data_validation/IsolationForestOutliers.py +2 -2
- validmind/tests/{model_validation/statsmodels → data_validation}/KPSS.py +64 -11
- validmind/tests/{model_validation/statsmodels → data_validation}/PhillipsPerronArch.py +65 -11
- validmind/tests/data_validation/ScatterPlot.py +1 -1
- validmind/tests/data_validation/SeasonalDecompose.py +12 -7
- validmind/tests/data_validation/TabularDateTimeHistograms.py +29 -33
- validmind/tests/data_validation/WOEBinPlots.py +1 -1
- validmind/tests/data_validation/WOEBinTable.py +1 -1
- validmind/tests/{model_validation/statsmodels → data_validation}/ZivotAndrewsArch.py +65 -11
- validmind/tests/data_validation/nlp/CommonWords.py +1 -1
- validmind/tests/data_validation/nlp/Hashtags.py +1 -1
- validmind/tests/data_validation/nlp/Mentions.py +1 -1
- validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py +2 -1
- validmind/tests/data_validation/nlp/Punctuations.py +1 -1
- validmind/tests/data_validation/nlp/Sentiment.py +1 -1
- validmind/tests/data_validation/nlp/TextDescription.py +5 -1
- validmind/tests/data_validation/nlp/Toxicity.py +1 -1
- validmind/tests/decorator.py +1 -1
- validmind/tests/model_validation/FeaturesAUC.py +5 -3
- validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py +4 -0
- validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py +4 -0
- validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py +4 -0
- validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py +4 -0
- validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py +4 -0
- validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py +4 -0
- validmind/tests/model_validation/ragas/AnswerCorrectness.py +3 -3
- validmind/tests/model_validation/ragas/AnswerRelevance.py +5 -4
- validmind/tests/model_validation/ragas/AnswerSimilarity.py +5 -4
- validmind/tests/model_validation/ragas/AspectCritique.py +14 -8
- validmind/tests/model_validation/ragas/ContextEntityRecall.py +3 -4
- validmind/tests/model_validation/ragas/ContextPrecision.py +4 -5
- validmind/tests/model_validation/ragas/ContextRecall.py +3 -4
- validmind/tests/model_validation/ragas/ContextRelevancy.py +5 -4
- validmind/tests/model_validation/ragas/Faithfulness.py +6 -5
- validmind/tests/model_validation/ragas/utils.py +35 -9
- validmind/tests/model_validation/sklearn/ClusterPerformance.py +2 -2
- validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py +1 -1
- validmind/tests/model_validation/sklearn/ModelsPerformanceComparison.py +6 -8
- validmind/tests/model_validation/sklearn/RegressionErrors.py +1 -1
- validmind/tests/model_validation/sklearn/RegressionModelsPerformanceComparison.py +14 -8
- validmind/tests/model_validation/sklearn/RegressionR2Square.py +1 -1
- validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py +1 -1
- validmind/tests/model_validation/statsmodels/GINITable.py +1 -1
- validmind/tests/model_validation/statsmodels/JarqueBera.py +1 -1
- validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py +1 -1
- validmind/tests/model_validation/statsmodels/LJungBox.py +1 -1
- validmind/tests/model_validation/statsmodels/Lilliefors.py +1 -1
- validmind/tests/model_validation/statsmodels/RegressionCoeffsPlot.py +4 -0
- validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py +9 -4
- validmind/tests/model_validation/statsmodels/RegressionModelsCoeffs.py +2 -2
- validmind/tests/model_validation/statsmodels/RunsTest.py +1 -1
- validmind/tests/model_validation/statsmodels/ShapiroWilk.py +1 -1
- validmind/tests/prompt_validation/Bias.py +14 -11
- validmind/tests/prompt_validation/Clarity.py +14 -11
- validmind/tests/prompt_validation/Conciseness.py +14 -11
- validmind/tests/prompt_validation/Delimitation.py +14 -11
- validmind/tests/prompt_validation/NegativeInstruction.py +14 -11
- validmind/tests/prompt_validation/Robustness.py +11 -11
- validmind/tests/prompt_validation/Specificity.py +14 -11
- validmind/tests/prompt_validation/ai_powered_test.py +53 -75
- validmind/unit_metrics/composite.py +2 -1
- validmind/utils.py +4 -49
- validmind/vm_models/dataset/dataset.py +17 -3
- validmind/vm_models/dataset/utils.py +2 -2
- validmind/vm_models/model.py +1 -1
- validmind/vm_models/test/metric.py +1 -8
- validmind/vm_models/test/result_wrapper.py +27 -34
- validmind/vm_models/test/test.py +3 -0
- validmind/vm_models/test/threshold_test.py +1 -1
- validmind/vm_models/test_suite/runner.py +12 -6
- validmind/vm_models/test_suite/summary.py +18 -7
- validmind/vm_models/test_suite/test.py +13 -20
- {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/METADATA +1 -1
- {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/RECORD +95 -104
- validmind/tests/data_validation/DefaultRatesbyRiskBandPlot.py +0 -114
- validmind/tests/data_validation/PiTCreditScoresHistogram.py +0 -150
- validmind/tests/data_validation/PiTPDHistogram.py +0 -152
- validmind/tests/model_validation/statsmodels/ADFTest.py +0 -88
- validmind/tests/model_validation/statsmodels/FeatureImportanceAndSignificance.py +0 -198
- validmind/tests/model_validation/statsmodels/PDRatingClassPlot.py +0 -151
- validmind/tests/model_validation/statsmodels/RegressionModelInsampleComparison.py +0 -146
- validmind/tests/model_validation/statsmodels/RegressionModelOutsampleComparison.py +0 -144
- validmind/tests/model_validation/statsmodels/RegressionModelsPerformance.py +0 -127
- validmind/tests/model_validation/statsmodels/ResidualsVisualInspection.py +0 -130
- {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/LICENSE +0 -0
- {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/WHEEL +0 -0
- {validmind-2.2.5.dist-info → validmind-2.3.1.dist-info}/entry_points.txt +0 -0
@@ -195,7 +195,19 @@ class VMDataset:
|
|
195
195
|
probability_column: str = None,
|
196
196
|
probability_values: list = None,
|
197
197
|
prediction_probabilities: list = None, # DEPRECATED: use probability_values
|
198
|
+
**kwargs,
|
198
199
|
):
|
200
|
+
"""Assign predictions and probabilities to the dataset.
|
201
|
+
|
202
|
+
Args:
|
203
|
+
model (VMModel): The model used to generate the predictions.
|
204
|
+
prediction_column (str, optional): The name of the column containing the predictions. Defaults to None.
|
205
|
+
prediction_values (list, optional): The values of the predictions. Defaults to None.
|
206
|
+
probability_column (str, optional): The name of the column containing the probabilities. Defaults to None.
|
207
|
+
probability_values (list, optional): The values of the probabilities. Defaults to None.
|
208
|
+
prediction_probabilities (list, optional): DEPRECATED: The values of the probabilities. Defaults to None.
|
209
|
+
kwargs: Additional keyword arguments that will get passed through to the model's `predict` method.
|
210
|
+
"""
|
199
211
|
if prediction_probabilities is not None:
|
200
212
|
warnings.warn(
|
201
213
|
"The `prediction_probabilities` argument is deprecated. Use `probability_values` instead.",
|
@@ -226,7 +238,9 @@ class VMDataset:
|
|
226
238
|
|
227
239
|
if prediction_values is None:
|
228
240
|
X = self.df if isinstance(model, (FunctionModel, PipelineModel)) else self.x
|
229
|
-
probability_values, prediction_values = compute_predictions(
|
241
|
+
probability_values, prediction_values = compute_predictions(
|
242
|
+
model, X, **kwargs
|
243
|
+
)
|
230
244
|
|
231
245
|
prediction_column = prediction_column or f"{model.input_id}_prediction"
|
232
246
|
self._add_column(prediction_column, prediction_values)
|
@@ -356,8 +370,8 @@ class VMDataset:
|
|
356
370
|
return as_df(self.df[self.probability_column(model)])
|
357
371
|
|
358
372
|
def target_classes(self):
|
359
|
-
"""Returns the
|
360
|
-
return [str(i) for i in np.unique(self.y)]
|
373
|
+
"""Returns the target class labels or unique values of the target column."""
|
374
|
+
return self.target_class_labels or [str(i) for i in np.unique(self.y)]
|
361
375
|
|
362
376
|
def __str__(self):
|
363
377
|
return (
|
@@ -94,7 +94,7 @@ def _is_probabilties(output):
|
|
94
94
|
return np.all((output >= 0) & (output <= 1)) and np.any((output > 0) & (output < 1))
|
95
95
|
|
96
96
|
|
97
|
-
def compute_predictions(model, X) -> tuple:
|
97
|
+
def compute_predictions(model, X, **kwargs) -> tuple:
|
98
98
|
probability_values = None
|
99
99
|
|
100
100
|
try:
|
@@ -108,7 +108,7 @@ def compute_predictions(model, X) -> tuple:
|
|
108
108
|
|
109
109
|
try:
|
110
110
|
logger.info("Running predict()... This may take a while")
|
111
|
-
prediction_values = model.predict(X)
|
111
|
+
prediction_values = model.predict(X, **kwargs)
|
112
112
|
logger.info("Done running predict()")
|
113
113
|
except MissingOrInvalidModelPredictFnError:
|
114
114
|
raise MissingOrInvalidModelPredictFnError(
|
validmind/vm_models/model.py
CHANGED
@@ -12,8 +12,8 @@ from typing import ClassVar, List, Optional, Union
|
|
12
12
|
|
13
13
|
import pandas as pd
|
14
14
|
|
15
|
+
from ...ai.test_descriptions import get_description_metadata
|
15
16
|
from ...errors import MissingCacheResultsArgumentsError
|
16
|
-
from ...utils import get_description_metadata
|
17
17
|
from ..figure import Figure
|
18
18
|
from .metric_result import MetricResult
|
19
19
|
from .result_wrapper import MetricResultWrapper
|
@@ -36,13 +36,6 @@ class Metric(Test):
|
|
36
36
|
# Instance Variables
|
37
37
|
result: MetricResultWrapper = None # populated by cache_results() method
|
38
38
|
|
39
|
-
@property
|
40
|
-
def key(self):
|
41
|
-
"""
|
42
|
-
Keep the key for compatibility reasons
|
43
|
-
"""
|
44
|
-
return self._key if hasattr(self, "_key") else self.name
|
45
|
-
|
46
39
|
@abstractmethod
|
47
40
|
def summary(self, metric_value: Optional[Union[dict, list, pd.DataFrame]] = None):
|
48
41
|
"""
|
@@ -7,7 +7,6 @@ Result Wrappers for test and metric results
|
|
7
7
|
"""
|
8
8
|
import asyncio
|
9
9
|
import json
|
10
|
-
import os
|
11
10
|
from abc import ABC, abstractmethod
|
12
11
|
from dataclasses import dataclass
|
13
12
|
from typing import Dict, List, Optional, Union
|
@@ -16,10 +15,10 @@ import pandas as pd
|
|
16
15
|
from ipywidgets import HTML, GridBox, Layout, VBox
|
17
16
|
|
18
17
|
from ... import api_client
|
19
|
-
from ...ai import DescriptionFuture
|
18
|
+
from ...ai.test_descriptions import AI_REVISION_NAME, DescriptionFuture
|
20
19
|
from ...input_registry import input_registry
|
21
20
|
from ...logging import get_logger
|
22
|
-
from ...utils import NumpyEncoder, display,
|
21
|
+
from ...utils import NumpyEncoder, display, run_async, test_id_to_name
|
23
22
|
from ..dataset import VMDataset
|
24
23
|
from ..figure import Figure
|
25
24
|
from .metric_result import MetricResult
|
@@ -31,31 +30,35 @@ logger = get_logger(__name__)
|
|
31
30
|
|
32
31
|
|
33
32
|
async def update_metadata(content_id: str, text: str, _json: Union[Dict, List] = None):
|
34
|
-
"""
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
"""
|
39
|
-
should_update = False
|
40
|
-
|
41
|
-
# check if the env variable is set to force overwriting metadata
|
42
|
-
if os.environ.get("VM_OVERRIDE_METADATA", "false").lower() == "true":
|
43
|
-
should_update = True
|
33
|
+
"""Create or Update a Metadata Object"""
|
34
|
+
parts = content_id.split("::")
|
35
|
+
content_id = parts[0]
|
36
|
+
revision_name = parts[1] if len(parts) > 1 else None
|
44
37
|
|
45
|
-
#
|
46
|
-
|
47
|
-
# we always want composite metric definitions to be updated
|
48
|
-
should_update = True
|
38
|
+
# we always want composite metric definitions to be updated
|
39
|
+
should_update = content_id.startswith("composite_metric_def:")
|
49
40
|
|
50
|
-
# if
|
51
|
-
if
|
41
|
+
# if we are updating a metric or test description, we check if the text
|
42
|
+
# has changed from the last time it was logged, and only update if it has
|
43
|
+
if content_id.split(":", 1)[0] in ["metric_description", "test_description"]:
|
52
44
|
try:
|
53
|
-
await api_client.get_metadata(content_id)
|
54
|
-
|
55
|
-
#
|
45
|
+
md = await api_client.get_metadata(content_id)
|
46
|
+
# if there is an existing description, only update it if the new one
|
47
|
+
# is different and is an AI-generated description
|
48
|
+
should_update = (
|
49
|
+
md["text"] != text if revision_name == AI_REVISION_NAME else False
|
50
|
+
)
|
51
|
+
logger.debug(f"Check if description has changed: {should_update}")
|
52
|
+
except Exception:
|
53
|
+
# if exception, assume its not created yet TODO: don't catch all
|
56
54
|
should_update = True
|
57
55
|
|
58
56
|
if should_update:
|
57
|
+
if revision_name:
|
58
|
+
content_id = f"{content_id}::{revision_name}"
|
59
|
+
|
60
|
+
logger.debug(f"Updating metadata for `{content_id}`")
|
61
|
+
|
59
62
|
await api_client.log_metadata(content_id, text, _json)
|
60
63
|
|
61
64
|
|
@@ -102,12 +105,6 @@ class ResultWrapper(ABC):
|
|
102
105
|
|
103
106
|
return self.to_widget()
|
104
107
|
|
105
|
-
def _markdown_description_to_html(self, description: str):
|
106
|
-
"""
|
107
|
-
Convert a markdown string to html
|
108
|
-
"""
|
109
|
-
return md_to_html(description)
|
110
|
-
|
111
108
|
def _summary_tables_to_widget(self, summary: ResultSummary):
|
112
109
|
"""
|
113
110
|
Create an ipywdiget representation of the summary tables
|
@@ -277,9 +274,7 @@ class MetricResultWrapper(ResultWrapper):
|
|
277
274
|
metric_description = metric_description.get_description()
|
278
275
|
self.result_metadata[0]["text"] = metric_description
|
279
276
|
|
280
|
-
vbox_children.append(
|
281
|
-
HTML(value=self._markdown_description_to_html(metric_description))
|
282
|
-
)
|
277
|
+
vbox_children.append(HTML(value=metric_description))
|
283
278
|
|
284
279
|
if self.metric:
|
285
280
|
if self.output_template:
|
@@ -464,9 +459,7 @@ class ThresholdTestResultWrapper(ResultWrapper):
|
|
464
459
|
metric_description = metric_description.get_description()
|
465
460
|
self.result_metadata[0]["text"] = metric_description
|
466
461
|
|
467
|
-
description_html.append(
|
468
|
-
self._markdown_description_to_html(metric_description)
|
469
|
-
)
|
462
|
+
description_html.append(metric_description)
|
470
463
|
|
471
464
|
description_html.append(
|
472
465
|
f"""
|
validmind/vm_models/test/test.py
CHANGED
@@ -52,6 +52,9 @@ class Test(TestUtils):
|
|
52
52
|
"test_id is missing. It must be passed when initializing the test"
|
53
53
|
)
|
54
54
|
self._ref_id = str(uuid4())
|
55
|
+
self.key = (
|
56
|
+
self.test_id
|
57
|
+
) # for backwards compatibility - figures really should get keyed automatically
|
55
58
|
|
56
59
|
# TODO: add validation for required inputs
|
57
60
|
if self.default_params is None:
|
@@ -11,7 +11,7 @@ avoid confusion with the "tests" in the general data science/modeling sense.
|
|
11
11
|
from dataclasses import dataclass
|
12
12
|
from typing import ClassVar, List, Optional
|
13
13
|
|
14
|
-
from ...
|
14
|
+
from ...ai.test_descriptions import get_description_metadata
|
15
15
|
from ..figure import Figure
|
16
16
|
from .result_summary import ResultSummary, ResultTable
|
17
17
|
from .result_wrapper import ThresholdTestResultWrapper
|
@@ -83,11 +83,14 @@ class TestSuiteRunner:
|
|
83
83
|
test_configs = test_configs.get("params", {})
|
84
84
|
else:
|
85
85
|
if (test_configs) and ("params" not in test_configs):
|
86
|
-
|
87
|
-
|
88
|
-
"Setting test parameters directly in the 'config' parameter
|
89
|
-
|
86
|
+
# [DEPRECATED] This is the old way of setting test parameters
|
87
|
+
msg = (
|
88
|
+
"Setting test parameters directly in the 'config' parameter"
|
89
|
+
" of the run_documentation_tests() method is deprecated. "
|
90
|
+
"Instead, use the new format of the config: "
|
91
|
+
'config = {"test_id": {"params": {...}, "inputs": {...}}}'
|
90
92
|
)
|
93
|
+
logger.warning(msg)
|
91
94
|
|
92
95
|
test.load(inputs=inputs, context=self.context, config=test_configs)
|
93
96
|
|
@@ -145,14 +148,17 @@ class TestSuiteRunner:
|
|
145
148
|
|
146
149
|
await asyncio.sleep(0.5)
|
147
150
|
|
148
|
-
def summarize(self):
|
151
|
+
def summarize(self, show_link: bool = True):
|
149
152
|
if not is_notebook():
|
150
153
|
return logger.info("Test suite done...")
|
151
154
|
|
155
|
+
self.pbar_description.value = "Collecting test results..."
|
156
|
+
|
152
157
|
summary = TestSuiteSummary(
|
153
158
|
title=self.suite.title,
|
154
159
|
description=self.suite.description,
|
155
160
|
sections=self.suite.sections,
|
161
|
+
show_link=show_link,
|
156
162
|
)
|
157
163
|
summary.display()
|
158
164
|
|
@@ -181,6 +187,6 @@ class TestSuiteRunner:
|
|
181
187
|
run_async(self.log_results)
|
182
188
|
run_async_check(self._check_progress)
|
183
189
|
|
184
|
-
self.summarize()
|
190
|
+
self.summarize(show_link=send)
|
185
191
|
|
186
192
|
self._stop_progress_bar()
|
@@ -35,8 +35,14 @@ class TestSuiteSectionSummary:
|
|
35
35
|
self._build_summary()
|
36
36
|
|
37
37
|
def _add_description(self):
|
38
|
-
|
39
|
-
|
38
|
+
if not self.description:
|
39
|
+
return
|
40
|
+
|
41
|
+
self._widgets.append(
|
42
|
+
widgets.HTML(
|
43
|
+
value=f'<div class="result">{md_to_html(self.description)}</div>'
|
44
|
+
)
|
45
|
+
)
|
40
46
|
|
41
47
|
def _add_tests_summary(self):
|
42
48
|
children = []
|
@@ -45,9 +51,9 @@ class TestSuiteSectionSummary:
|
|
45
51
|
for test in self.tests:
|
46
52
|
children.append(test.result.to_widget())
|
47
53
|
titles.append(
|
48
|
-
f"❌ {test.result.name}: {test.
|
54
|
+
f"❌ {test.result.name}: {test.name} ({test.test_id})"
|
49
55
|
if isinstance(test.result, FailedResultWrapper)
|
50
|
-
else f"{test.result.name}: {test.
|
56
|
+
else f"{test.result.name}: {test.name} ({test.test_id})"
|
51
57
|
)
|
52
58
|
|
53
59
|
self._widgets.append(widgets.Accordion(children=children, titles=titles))
|
@@ -71,6 +77,7 @@ class TestSuiteSummary:
|
|
71
77
|
title: str
|
72
78
|
description: str
|
73
79
|
sections: List[TestSuiteSection]
|
80
|
+
show_link: bool = True
|
74
81
|
|
75
82
|
_widgets: List[widgets.Widget] = None
|
76
83
|
|
@@ -100,8 +107,11 @@ class TestSuiteSummary:
|
|
100
107
|
self._widgets.append(widgets.HTML(value=results_link))
|
101
108
|
|
102
109
|
def _add_description(self):
|
103
|
-
|
104
|
-
|
110
|
+
self._widgets.append(
|
111
|
+
widgets.HTML(
|
112
|
+
value=f'<div class="result">{md_to_html(self.description)}</div>'
|
113
|
+
)
|
114
|
+
)
|
105
115
|
|
106
116
|
def _add_sections_summary(self):
|
107
117
|
children = []
|
@@ -145,7 +155,8 @@ class TestSuiteSummary:
|
|
145
155
|
self._widgets = []
|
146
156
|
|
147
157
|
self._add_title()
|
148
|
-
self.
|
158
|
+
if self.show_link:
|
159
|
+
self._add_results_link()
|
149
160
|
self._add_description()
|
150
161
|
if len(self.sections) == 1:
|
151
162
|
self._add_top_level_section_summary()
|
@@ -21,6 +21,7 @@ class TestSuiteTest:
|
|
21
21
|
|
22
22
|
test_id: str
|
23
23
|
output_template: str = None
|
24
|
+
name: str = None
|
24
25
|
|
25
26
|
_test_class: Test = None
|
26
27
|
_test_instance: Test = None
|
@@ -39,6 +40,8 @@ class TestSuiteTest:
|
|
39
40
|
self.test_id = test_id_or_obj["id"]
|
40
41
|
self.output_template = test_id_or_obj.get("output_template")
|
41
42
|
|
43
|
+
self.name = test_id_to_name(self.test_id)
|
44
|
+
|
42
45
|
try:
|
43
46
|
self._test_class = load_test_class(self.test_id)
|
44
47
|
except LoadTestError as e:
|
@@ -52,14 +55,6 @@ class TestSuiteTest:
|
|
52
55
|
# since _test_class is None
|
53
56
|
logger.error(f"Failed to load test '{self.test_id}': {e}")
|
54
57
|
|
55
|
-
@property
|
56
|
-
def title(self):
|
57
|
-
return test_id_to_name(self.test_id)
|
58
|
-
|
59
|
-
@property
|
60
|
-
def name(self):
|
61
|
-
return self._test_class.name
|
62
|
-
|
63
58
|
@property
|
64
59
|
def test_type(self):
|
65
60
|
return self._test_class.test_type
|
@@ -86,12 +81,12 @@ class TestSuiteTest:
|
|
86
81
|
)
|
87
82
|
except Exception as e:
|
88
83
|
logger.error(
|
89
|
-
f"Failed to load test '{self.
|
84
|
+
f"Failed to load test '{self.test_id}': "
|
90
85
|
f"({e.__class__.__name__}) {e}"
|
91
86
|
)
|
92
87
|
self.result = FailedResultWrapper(
|
93
88
|
error=e,
|
94
|
-
message=f"Failed to load test '{self.
|
89
|
+
message=f"Failed to load test '{self.name}'",
|
95
90
|
result_id=self.test_id,
|
96
91
|
)
|
97
92
|
|
@@ -107,7 +102,7 @@ class TestSuiteTest:
|
|
107
102
|
# run the test and log the performance if LOG_LEVEL is set to DEBUG
|
108
103
|
log_performance(
|
109
104
|
func=self._test_instance.run,
|
110
|
-
name=self.
|
105
|
+
name=self.test_id,
|
111
106
|
logger=logger,
|
112
107
|
)() # this is a decorator so we need to call it
|
113
108
|
|
@@ -116,14 +111,13 @@ class TestSuiteTest:
|
|
116
111
|
raise e # Re-raise the exception if we are in fail fast mode
|
117
112
|
|
118
113
|
logger.error(
|
119
|
-
f"Failed to run test '{self.
|
120
|
-
f"({e.__class__.__name__}) {e}"
|
114
|
+
f"Failed to run test '{self.test_id}': " f"({e.__class__.__name__}) {e}"
|
121
115
|
)
|
122
116
|
self.result = FailedResultWrapper(
|
123
117
|
name=f"Failed {self._test_instance.test_type}",
|
124
118
|
error=e,
|
125
|
-
message=f"Failed to run '{self.
|
126
|
-
result_id=self.
|
119
|
+
message=f"Failed to run '{self.name}'",
|
120
|
+
result_id=self.test_id,
|
127
121
|
)
|
128
122
|
|
129
123
|
return
|
@@ -132,8 +126,8 @@ class TestSuiteTest:
|
|
132
126
|
self.result = FailedResultWrapper(
|
133
127
|
name=f"Failed {self._test_instance.test_type}",
|
134
128
|
error=None,
|
135
|
-
message=f"'{self.
|
136
|
-
result_id=self.
|
129
|
+
message=f"'{self.name}' did not return a result",
|
130
|
+
result_id=self.test_id,
|
137
131
|
)
|
138
132
|
|
139
133
|
return
|
@@ -142,9 +136,8 @@ class TestSuiteTest:
|
|
142
136
|
self.result = FailedResultWrapper(
|
143
137
|
name=f"Failed {self._test_instance.test_type}",
|
144
138
|
error=None,
|
145
|
-
message=f"
|
146
|
-
|
147
|
-
result_id=self._test_instance.name,
|
139
|
+
message=f"{self.name} returned an invalid result: {self._test_instance.result}",
|
140
|
+
result_id=self.test_id,
|
148
141
|
)
|
149
142
|
|
150
143
|
return
|