validmind 2.8.12__py3-none-any.whl → 2.8.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- validmind/__init__.py +6 -5
- validmind/__version__.py +1 -1
- validmind/ai/test_descriptions.py +13 -9
- validmind/ai/utils.py +2 -2
- validmind/api_client.py +75 -32
- validmind/client.py +108 -100
- validmind/client_config.py +3 -3
- validmind/datasets/classification/__init__.py +7 -3
- validmind/datasets/credit_risk/lending_club.py +28 -16
- validmind/datasets/nlp/cnn_dailymail.py +10 -4
- validmind/datasets/regression/__init__.py +22 -5
- validmind/errors.py +17 -7
- validmind/input_registry.py +1 -1
- validmind/logging.py +44 -35
- validmind/models/foundation.py +2 -2
- validmind/models/function.py +10 -3
- validmind/template.py +30 -22
- validmind/test_suites/__init__.py +2 -2
- validmind/tests/_store.py +13 -4
- validmind/tests/comparison.py +65 -33
- validmind/tests/data_validation/ClassImbalance.py +3 -1
- validmind/tests/data_validation/DatasetDescription.py +2 -23
- validmind/tests/data_validation/DescriptiveStatistics.py +1 -1
- validmind/tests/data_validation/Skewness.py +7 -6
- validmind/tests/decorator.py +14 -11
- validmind/tests/load.py +38 -24
- validmind/tests/model_validation/ragas/AnswerCorrectness.py +4 -2
- validmind/tests/model_validation/ragas/ContextEntityRecall.py +4 -2
- validmind/tests/model_validation/ragas/ContextPrecision.py +4 -2
- validmind/tests/model_validation/ragas/ContextPrecisionWithoutReference.py +4 -2
- validmind/tests/model_validation/ragas/ContextRecall.py +4 -2
- validmind/tests/model_validation/ragas/Faithfulness.py +4 -2
- validmind/tests/model_validation/ragas/ResponseRelevancy.py +4 -2
- validmind/tests/model_validation/ragas/SemanticSimilarity.py +4 -2
- validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py +13 -3
- validmind/tests/model_validation/sklearn/OverfitDiagnosis.py +3 -1
- validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +28 -25
- validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py +15 -10
- validmind/tests/output.py +66 -11
- validmind/tests/run.py +28 -14
- validmind/tests/test_providers.py +28 -35
- validmind/tests/utils.py +17 -4
- validmind/unit_metrics/__init__.py +1 -1
- validmind/utils.py +295 -31
- validmind/vm_models/dataset/dataset.py +19 -16
- validmind/vm_models/dataset/utils.py +5 -3
- validmind/vm_models/figure.py +6 -6
- validmind/vm_models/input.py +6 -5
- validmind/vm_models/model.py +5 -5
- validmind/vm_models/result/result.py +122 -43
- validmind/vm_models/result/utils.py +5 -5
- validmind/vm_models/test_suite/__init__.py +5 -0
- validmind/vm_models/test_suite/runner.py +5 -5
- validmind/vm_models/test_suite/summary.py +20 -2
- validmind/vm_models/test_suite/test.py +6 -6
- validmind/vm_models/test_suite/test_suite.py +10 -10
- {validmind-2.8.12.dist-info → validmind-2.8.20.dist-info}/METADATA +3 -4
- {validmind-2.8.12.dist-info → validmind-2.8.20.dist-info}/RECORD +61 -60
- {validmind-2.8.12.dist-info → validmind-2.8.20.dist-info}/WHEEL +1 -1
- {validmind-2.8.12.dist-info → validmind-2.8.20.dist-info}/LICENSE +0 -0
- {validmind-2.8.12.dist-info → validmind-2.8.20.dist-info}/entry_points.txt +0 -0
@@ -3,7 +3,7 @@
|
|
3
3
|
# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
|
4
4
|
|
5
5
|
"""
|
6
|
-
Result
|
6
|
+
Result objects for test results
|
7
7
|
"""
|
8
8
|
import asyncio
|
9
9
|
import json
|
@@ -19,6 +19,7 @@ from ipywidgets import HTML, VBox
|
|
19
19
|
|
20
20
|
from ... import api_client
|
21
21
|
from ...ai.utils import DescriptionFuture
|
22
|
+
from ...errors import InvalidParameterError
|
22
23
|
from ...logging import get_logger
|
23
24
|
from ...utils import (
|
24
25
|
HumanReadableEncoder,
|
@@ -43,15 +44,15 @@ logger = get_logger(__name__)
|
|
43
44
|
|
44
45
|
|
45
46
|
class RawData:
|
46
|
-
"""Holds raw data for a test result"""
|
47
|
+
"""Holds raw data for a test result."""
|
47
48
|
|
48
|
-
def __init__(self, log: bool = False, **kwargs):
|
49
|
-
"""Create a new RawData object
|
49
|
+
def __init__(self, log: bool = False, **kwargs: Any) -> None:
|
50
|
+
"""Create a new RawData object.
|
50
51
|
|
51
52
|
Args:
|
52
|
-
log (bool): If True, log the raw data to ValidMind
|
53
|
-
**kwargs: Keyword arguments to set as attributes
|
54
|
-
`RawData(log=True, dataset_duplicates=df_duplicates)
|
53
|
+
log (bool): If True, log the raw data to ValidMind.
|
54
|
+
**kwargs: Keyword arguments to set as attributes, such as
|
55
|
+
`RawData(log=True, dataset_duplicates=df_duplicates)`.
|
55
56
|
"""
|
56
57
|
self.log = log
|
57
58
|
|
@@ -61,8 +62,16 @@ class RawData:
|
|
61
62
|
def __repr__(self) -> str:
|
62
63
|
return f"RawData({', '.join(self.__dict__.keys())})"
|
63
64
|
|
64
|
-
def inspect(self, show: bool = True):
|
65
|
-
"""Inspect the raw data
|
65
|
+
def inspect(self, show: bool = True) -> Optional[Dict[str, Any]]:
|
66
|
+
"""Inspect the raw data.
|
67
|
+
|
68
|
+
Args:
|
69
|
+
show (bool): If True, print the raw data. If False, return it.
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
Optional[Dict[str, Any]]: If True, print the raw data and return None. If
|
73
|
+
False, return the raw data dictionary.
|
74
|
+
"""
|
66
75
|
raw_data = {
|
67
76
|
key: getattr(self, key)
|
68
77
|
for key in self.__dict__
|
@@ -73,15 +82,21 @@ class RawData:
|
|
73
82
|
return raw_data
|
74
83
|
|
75
84
|
print(json.dumps(raw_data, indent=2, cls=HumanReadableEncoder))
|
85
|
+
return None
|
76
86
|
|
77
|
-
def serialize(self):
|
87
|
+
def serialize(self) -> Dict[str, Any]:
|
88
|
+
"""Serialize the raw data to a dictionary
|
89
|
+
|
90
|
+
Returns:
|
91
|
+
Dict[str, Any]: The serialized raw data
|
92
|
+
"""
|
78
93
|
return {key: getattr(self, key) for key in self.__dict__}
|
79
94
|
|
80
95
|
|
81
96
|
@dataclass
|
82
97
|
class ResultTable:
|
83
98
|
"""
|
84
|
-
A dataclass that holds the table summary of result
|
99
|
+
A dataclass that holds the table summary of result.
|
85
100
|
"""
|
86
101
|
|
87
102
|
data: Union[List[Any], pd.DataFrame]
|
@@ -110,33 +125,33 @@ class ResultTable:
|
|
110
125
|
|
111
126
|
@dataclass
|
112
127
|
class Result:
|
113
|
-
"""Base Class for test suite results"""
|
128
|
+
"""Base Class for test suite results."""
|
114
129
|
|
115
130
|
result_id: str = None
|
116
131
|
name: str = None
|
117
132
|
|
118
133
|
def __str__(self) -> str:
|
119
|
-
"""May be overridden by subclasses"""
|
134
|
+
"""May be overridden by subclasses."""
|
120
135
|
return self.__class__.__name__
|
121
136
|
|
122
137
|
@abstractmethod
|
123
138
|
def to_widget(self):
|
124
|
-
"""Create an
|
139
|
+
"""Create an ipywidget representation of the result... Must be overridden by subclasses."""
|
125
140
|
raise NotImplementedError
|
126
141
|
|
127
142
|
@abstractmethod
|
128
143
|
def log(self):
|
129
|
-
"""Log the result... Must be overridden by subclasses"""
|
144
|
+
"""Log the result... Must be overridden by subclasses."""
|
130
145
|
raise NotImplementedError
|
131
146
|
|
132
147
|
def show(self):
|
133
|
-
"""Display the result... May be overridden by subclasses"""
|
148
|
+
"""Display the result... May be overridden by subclasses."""
|
134
149
|
display(self.to_widget())
|
135
150
|
|
136
151
|
|
137
152
|
@dataclass
|
138
153
|
class ErrorResult(Result):
|
139
|
-
"""Result for test suites that fail to load or run properly"""
|
154
|
+
"""Result for test suites that fail to load or run properly."""
|
140
155
|
|
141
156
|
name: str = "Failed Test"
|
142
157
|
error: Exception = None
|
@@ -154,7 +169,7 @@ class ErrorResult(Result):
|
|
154
169
|
|
155
170
|
@dataclass
|
156
171
|
class TestResult(Result):
|
157
|
-
"""Test result"""
|
172
|
+
"""Test result."""
|
158
173
|
|
159
174
|
name: str = "Test Result"
|
160
175
|
ref_id: str = None
|
@@ -232,12 +247,12 @@ class TestResult(Result):
|
|
232
247
|
table: Union[ResultTable, pd.DataFrame, List[Dict[str, Any]]],
|
233
248
|
title: Optional[str] = None,
|
234
249
|
):
|
235
|
-
"""Add a new table to the result
|
250
|
+
"""Add a new table to the result.
|
236
251
|
|
237
252
|
Args:
|
238
|
-
table (Union[ResultTable, pd.DataFrame, List[Dict[str, Any]]]): The table to add
|
253
|
+
table (Union[ResultTable, pd.DataFrame, List[Dict[str, Any]]]): The table to add.
|
239
254
|
title (Optional[str]): The title of the table (can optionally be provided for
|
240
|
-
pd.DataFrame and List[Dict[str, Any]] tables)
|
255
|
+
pd.DataFrame and List[Dict[str, Any]] tables).
|
241
256
|
"""
|
242
257
|
if self.tables is None:
|
243
258
|
self.tables = []
|
@@ -248,10 +263,10 @@ class TestResult(Result):
|
|
248
263
|
self.tables.append(table)
|
249
264
|
|
250
265
|
def remove_table(self, index: int):
|
251
|
-
"""Remove a table from the result by index
|
266
|
+
"""Remove a table from the result by index.
|
252
267
|
|
253
268
|
Args:
|
254
|
-
index (int): The index of the table to remove (default is 0)
|
269
|
+
index (int): The index of the table to remove (default is 0).
|
255
270
|
"""
|
256
271
|
if self.tables is None:
|
257
272
|
return
|
@@ -267,14 +282,19 @@ class TestResult(Result):
|
|
267
282
|
bytes,
|
268
283
|
Figure,
|
269
284
|
],
|
270
|
-
):
|
271
|
-
"""Add a new figure to the result
|
285
|
+
) -> None:
|
286
|
+
"""Add a new figure to the result.
|
272
287
|
|
273
288
|
Args:
|
274
|
-
figure
|
275
|
-
|
276
|
-
|
277
|
-
|
289
|
+
figure: The figure to add. Can be one of:
|
290
|
+
- matplotlib.figure.Figure: A matplotlib figure
|
291
|
+
- plotly.graph_objs.Figure: A plotly figure
|
292
|
+
- plotly.graph_objs.FigureWidget: A plotly figure widget
|
293
|
+
- bytes: A PNG image as raw bytes
|
294
|
+
- validmind.vm_models.figure.Figure: A ValidMind figure object.
|
295
|
+
|
296
|
+
Returns:
|
297
|
+
None.
|
278
298
|
"""
|
279
299
|
if self.figures is None:
|
280
300
|
self.figures = []
|
@@ -293,10 +313,10 @@ class TestResult(Result):
|
|
293
313
|
self.figures.append(figure)
|
294
314
|
|
295
315
|
def remove_figure(self, index: int = 0):
|
296
|
-
"""Remove a figure from the result by index
|
316
|
+
"""Remove a figure from the result by index.
|
297
317
|
|
298
318
|
Args:
|
299
|
-
index (int): The index of the figure to remove (default is 0)
|
319
|
+
index (int): The index of the figure to remove (default is 0).
|
300
320
|
"""
|
301
321
|
if self.figures is None:
|
302
322
|
return
|
@@ -332,7 +352,7 @@ class TestResult(Result):
|
|
332
352
|
|
333
353
|
@classmethod
|
334
354
|
def _get_client_config(cls):
|
335
|
-
"""Get the client config, loading it if not cached"""
|
355
|
+
"""Get the client config, loading it if not cached."""
|
336
356
|
if cls._client_config_cache is None:
|
337
357
|
api_client.reload()
|
338
358
|
cls._client_config_cache = api_client.client_config
|
@@ -350,7 +370,7 @@ class TestResult(Result):
|
|
350
370
|
return cls._client_config_cache
|
351
371
|
|
352
372
|
def check_result_id_exist(self):
|
353
|
-
"""Check if the result_id exists in any test block across all sections"""
|
373
|
+
"""Check if the result_id exists in any test block across all sections."""
|
354
374
|
client_config = self._get_client_config()
|
355
375
|
|
356
376
|
# Iterate through all sections
|
@@ -371,7 +391,7 @@ class TestResult(Result):
|
|
371
391
|
def _validate_section_id_for_block(
|
372
392
|
self, section_id: str, position: Union[int, None] = None
|
373
393
|
):
|
374
|
-
"""Validate the section_id exits on the template before logging"""
|
394
|
+
"""Validate the section_id exits on the template before logging."""
|
375
395
|
client_config = self._get_client_config()
|
376
396
|
found = False
|
377
397
|
|
@@ -410,7 +430,7 @@ class TestResult(Result):
|
|
410
430
|
)
|
411
431
|
|
412
432
|
def serialize(self):
|
413
|
-
"""Serialize the result for the API"""
|
433
|
+
"""Serialize the result for the API."""
|
414
434
|
return {
|
415
435
|
"test_name": self.result_id,
|
416
436
|
"title": self.title,
|
@@ -423,10 +443,16 @@ class TestResult(Result):
|
|
423
443
|
}
|
424
444
|
|
425
445
|
async def log_async(
|
426
|
-
self,
|
446
|
+
self,
|
447
|
+
section_id: str = None,
|
448
|
+
position: int = None,
|
449
|
+
config: Dict[str, bool] = None,
|
427
450
|
):
|
428
451
|
tasks = [] # collect tasks to run in parallel (async)
|
429
452
|
|
453
|
+
# Default empty dict if None
|
454
|
+
config = config or {}
|
455
|
+
|
430
456
|
if self.metric is not None:
|
431
457
|
# metrics are logged as separate entities
|
432
458
|
tasks.append(
|
@@ -438,12 +464,13 @@ class TestResult(Result):
|
|
438
464
|
)
|
439
465
|
)
|
440
466
|
|
441
|
-
if self.tables or self.figures:
|
467
|
+
if self.tables or self.figures or self.description:
|
442
468
|
tasks.append(
|
443
469
|
api_client.alog_test_result(
|
444
470
|
result=self.serialize(),
|
445
471
|
section_id=section_id,
|
446
472
|
position=position,
|
473
|
+
config=config,
|
447
474
|
)
|
448
475
|
)
|
449
476
|
|
@@ -467,17 +494,32 @@ class TestResult(Result):
|
|
467
494
|
|
468
495
|
return await asyncio.gather(*tasks)
|
469
496
|
|
470
|
-
def log(
|
471
|
-
|
497
|
+
def log(
|
498
|
+
self,
|
499
|
+
section_id: str = None,
|
500
|
+
position: int = None,
|
501
|
+
unsafe: bool = False,
|
502
|
+
config: Dict[str, bool] = None,
|
503
|
+
):
|
504
|
+
"""Log the result to ValidMind.
|
472
505
|
|
473
506
|
Args:
|
474
507
|
section_id (str): The section ID within the model document to insert the
|
475
|
-
test result
|
508
|
+
test result.
|
476
509
|
position (int): The position (index) within the section to insert the test
|
477
|
-
result
|
510
|
+
result.
|
478
511
|
unsafe (bool): If True, log the result even if it contains sensitive data
|
479
|
-
i.e. raw data from input datasets
|
512
|
+
i.e. raw data from input datasets.
|
513
|
+
config (Dict[str, bool]): Configuration options for displaying the test result.
|
514
|
+
Available config options:
|
515
|
+
- hideTitle: Hide the title in the document view
|
516
|
+
- hideText: Hide the description text in the document view
|
517
|
+
- hideParams: Hide the parameters in the document view
|
518
|
+
- hideTables: Hide tables in the document view
|
519
|
+
- hideFigures: Hide figures in the document view
|
480
520
|
"""
|
521
|
+
if config:
|
522
|
+
self.validate_log_config(config)
|
481
523
|
|
482
524
|
self.check_result_id_exist()
|
483
525
|
|
@@ -488,4 +530,41 @@ class TestResult(Result):
|
|
488
530
|
if section_id:
|
489
531
|
self._validate_section_id_for_block(section_id, position)
|
490
532
|
|
491
|
-
run_async(
|
533
|
+
run_async(
|
534
|
+
self.log_async,
|
535
|
+
section_id=section_id,
|
536
|
+
position=position,
|
537
|
+
config=config,
|
538
|
+
)
|
539
|
+
|
540
|
+
def validate_log_config(self, config: Dict[str, bool]):
|
541
|
+
"""Validate the configuration options for logging a test result
|
542
|
+
|
543
|
+
Args:
|
544
|
+
config (Dict[str, bool]): Configuration options to validate
|
545
|
+
|
546
|
+
Raises:
|
547
|
+
InvalidParameterError: If config contains invalid keys or non-boolean values
|
548
|
+
"""
|
549
|
+
valid_keys = {
|
550
|
+
"hideTitle",
|
551
|
+
"hideText",
|
552
|
+
"hideParams",
|
553
|
+
"hideTables",
|
554
|
+
"hideFigures",
|
555
|
+
}
|
556
|
+
invalid_keys = set(config.keys()) - valid_keys
|
557
|
+
if invalid_keys:
|
558
|
+
raise InvalidParameterError(
|
559
|
+
f"Invalid config keys: {', '.join(invalid_keys)}. "
|
560
|
+
f"Valid keys are: {', '.join(valid_keys)}"
|
561
|
+
)
|
562
|
+
|
563
|
+
# Ensure all values are boolean
|
564
|
+
non_bool_keys = [
|
565
|
+
key for key, value in config.items() if not isinstance(value, bool)
|
566
|
+
]
|
567
|
+
if non_bool_keys:
|
568
|
+
raise InvalidParameterError(
|
569
|
+
f"Values for config keys must be boolean. Non-boolean values found for keys: {', '.join(non_bool_keys)}"
|
570
|
+
)
|
@@ -28,7 +28,7 @@ _result_template = None
|
|
28
28
|
|
29
29
|
|
30
30
|
def get_result_template():
|
31
|
-
"""Get the
|
31
|
+
"""Get the Jinja2 HTML template for rendering test results."""
|
32
32
|
global _result_template
|
33
33
|
|
34
34
|
if _result_template is None:
|
@@ -39,7 +39,7 @@ def get_result_template():
|
|
39
39
|
|
40
40
|
|
41
41
|
async def update_metadata(content_id: str, text: str, _json: Union[Dict, List] = None):
|
42
|
-
"""Create or
|
42
|
+
"""Create or update a metadata object."""
|
43
43
|
parts = content_id.split("::")
|
44
44
|
content_id = parts[0]
|
45
45
|
revision_name = parts[1] if len(parts) > 1 else None
|
@@ -53,7 +53,7 @@ async def update_metadata(content_id: str, text: str, _json: Union[Dict, List] =
|
|
53
53
|
|
54
54
|
|
55
55
|
def check_for_sensitive_data(data: pd.DataFrame, inputs: List[VMInput]):
|
56
|
-
"""Check if
|
56
|
+
"""Check if the data contains sensitive information from input datasets."""
|
57
57
|
dataset_columns = {
|
58
58
|
col: len(input_obj.df)
|
59
59
|
for input_obj in inputs
|
@@ -77,7 +77,7 @@ def check_for_sensitive_data(data: pd.DataFrame, inputs: List[VMInput]):
|
|
77
77
|
|
78
78
|
|
79
79
|
def tables_to_widgets(tables: List["ResultTable"]):
|
80
|
-
"""Convert
|
80
|
+
"""Convert a list of tables to ipywidgets."""
|
81
81
|
widgets = [
|
82
82
|
HTML("<h3>Tables</h3>"),
|
83
83
|
]
|
@@ -128,7 +128,7 @@ def tables_to_widgets(tables: List["ResultTable"]):
|
|
128
128
|
|
129
129
|
|
130
130
|
def figures_to_widgets(figures: List[Figure]) -> list:
|
131
|
-
"""
|
131
|
+
"""Convert a list of figures to ipywidgets."""
|
132
132
|
num_columns = 2 if len(figures) > 1 else 1
|
133
133
|
|
134
134
|
plot_widgets = GridBox(
|
@@ -17,7 +17,7 @@ logger = get_logger(__name__)
|
|
17
17
|
|
18
18
|
class TestSuiteRunner:
|
19
19
|
"""
|
20
|
-
Runs a test suite
|
20
|
+
Runs a test suite.
|
21
21
|
"""
|
22
22
|
|
23
23
|
suite: TestSuite = None
|
@@ -36,7 +36,7 @@ class TestSuiteRunner:
|
|
36
36
|
self._load_config(inputs)
|
37
37
|
|
38
38
|
def _load_config(self, inputs: dict = None):
|
39
|
-
"""Splits the config into a global config and test configs"""
|
39
|
+
"""Splits the config into a global config and test configs."""
|
40
40
|
self._test_configs = {
|
41
41
|
test.test_id: {"inputs": inputs or {}} for test in self.suite.get_tests()
|
42
42
|
}
|
@@ -59,7 +59,7 @@ class TestSuiteRunner:
|
|
59
59
|
|
60
60
|
def _start_progress_bar(self, send: bool = True):
|
61
61
|
"""
|
62
|
-
Initializes the progress bar elements
|
62
|
+
Initializes the progress bar elements.
|
63
63
|
"""
|
64
64
|
# TODO: make this work for when user runs only a section of the test suite
|
65
65
|
# if we are sending then there is a task for each test and logging its result
|
@@ -76,7 +76,7 @@ class TestSuiteRunner:
|
|
76
76
|
self.pbar.close()
|
77
77
|
|
78
78
|
async def log_results(self):
|
79
|
-
"""Logs the results of the test suite to ValidMind
|
79
|
+
"""Logs the results of the test suite to ValidMind.
|
80
80
|
|
81
81
|
This method will be called after the test suite has been run and all results have been
|
82
82
|
collected. This method will log the results to ValidMind.
|
@@ -127,7 +127,7 @@ class TestSuiteRunner:
|
|
127
127
|
summary.display()
|
128
128
|
|
129
129
|
def run(self, send: bool = True, fail_fast: bool = False):
|
130
|
-
"""Runs the test suite, renders the summary and sends the results to ValidMind
|
130
|
+
"""Runs the test suite, renders the summary and sends the results to ValidMind.
|
131
131
|
|
132
132
|
Args:
|
133
133
|
send (bool, optional): Whether to send the results to ValidMind.
|
@@ -16,6 +16,7 @@ logger = get_logger(__name__)
|
|
16
16
|
|
17
17
|
|
18
18
|
def id_to_name(id: str) -> str:
|
19
|
+
"""Convert an ID to a human-readable name."""
|
19
20
|
# replace underscores, hyphens etc with spaces
|
20
21
|
name = id.replace("_", " ").replace("-", " ").replace(".", " ")
|
21
22
|
# capitalize each word
|
@@ -26,6 +27,8 @@ def id_to_name(id: str) -> str:
|
|
26
27
|
|
27
28
|
@dataclass
|
28
29
|
class TestSuiteSectionSummary:
|
30
|
+
"""Represents a summary of a test suite section."""
|
31
|
+
|
29
32
|
tests: List[TestSuiteTest]
|
30
33
|
description: Optional[str] = None
|
31
34
|
|
@@ -35,6 +38,7 @@ class TestSuiteSectionSummary:
|
|
35
38
|
self._build_summary()
|
36
39
|
|
37
40
|
def _add_description(self):
|
41
|
+
"""Add the section description to the summary."""
|
38
42
|
if not self.description:
|
39
43
|
return
|
40
44
|
|
@@ -45,6 +49,7 @@ class TestSuiteSectionSummary:
|
|
45
49
|
)
|
46
50
|
|
47
51
|
def _add_tests_summary(self):
|
52
|
+
"""Add the test results summary."""
|
48
53
|
children = []
|
49
54
|
titles = []
|
50
55
|
|
@@ -59,6 +64,7 @@ class TestSuiteSectionSummary:
|
|
59
64
|
self._widgets.append(widgets.Accordion(children=children, titles=titles))
|
60
65
|
|
61
66
|
def _build_summary(self):
|
67
|
+
"""Build the complete summary."""
|
62
68
|
self._widgets = []
|
63
69
|
|
64
70
|
if self.description:
|
@@ -69,11 +75,14 @@ class TestSuiteSectionSummary:
|
|
69
75
|
self.summary = widgets.VBox(self._widgets)
|
70
76
|
|
71
77
|
def display(self):
|
78
|
+
"""Display the summary."""
|
72
79
|
display(self.summary)
|
73
80
|
|
74
81
|
|
75
82
|
@dataclass
|
76
83
|
class TestSuiteSummary:
|
84
|
+
"""Represents a summary of a complete test suite."""
|
85
|
+
|
77
86
|
title: str
|
78
87
|
description: str
|
79
88
|
sections: List[TestSuiteSection]
|
@@ -82,9 +91,11 @@ class TestSuiteSummary:
|
|
82
91
|
_widgets: List[widgets.Widget] = None
|
83
92
|
|
84
93
|
def __post_init__(self):
|
94
|
+
"""Initialize the summary after the dataclass is created."""
|
85
95
|
self._build_summary()
|
86
96
|
|
87
97
|
def _add_title(self):
|
98
|
+
"""Add the title to the summary."""
|
88
99
|
title = f"""
|
89
100
|
<h2>Test Suite Results: <i style="color: #DE257E">{self.title}</i></h2><hr>
|
90
101
|
""".strip()
|
@@ -92,6 +103,7 @@ class TestSuiteSummary:
|
|
92
103
|
self._widgets.append(widgets.HTML(value=title))
|
93
104
|
|
94
105
|
def _add_results_link(self):
|
106
|
+
"""Add a link to documentation on ValidMind."""
|
95
107
|
# avoid circular import
|
96
108
|
from ...api_client import get_api_host, get_api_model
|
97
109
|
|
@@ -99,14 +111,15 @@ class TestSuiteSummary:
|
|
99
111
|
link = f"{ui_host}model-inventory/{get_api_model()}"
|
100
112
|
results_link = f"""
|
101
113
|
<h3>
|
102
|
-
Check out the updated documentation
|
103
|
-
<a href="{link}" target="_blank">ValidMind
|
114
|
+
Check out the updated documentation on
|
115
|
+
<a href="{link}" target="_blank">ValidMind</a>.
|
104
116
|
</h3>
|
105
117
|
""".strip()
|
106
118
|
|
107
119
|
self._widgets.append(widgets.HTML(value=results_link))
|
108
120
|
|
109
121
|
def _add_description(self):
|
122
|
+
"""Add the test suite description to the summary."""
|
110
123
|
self._widgets.append(
|
111
124
|
widgets.HTML(
|
112
125
|
value=f'<div class="result">{md_to_html(self.description)}</div>'
|
@@ -114,6 +127,7 @@ class TestSuiteSummary:
|
|
114
127
|
)
|
115
128
|
|
116
129
|
def _add_sections_summary(self):
|
130
|
+
"""Append the section summary."""
|
117
131
|
children = []
|
118
132
|
titles = []
|
119
133
|
|
@@ -132,11 +146,13 @@ class TestSuiteSummary:
|
|
132
146
|
self._widgets.append(widgets.Accordion(children=children, titles=titles))
|
133
147
|
|
134
148
|
def _add_top_level_section_summary(self):
|
149
|
+
"""Add the top-level section summary."""
|
135
150
|
self._widgets.append(
|
136
151
|
TestSuiteSectionSummary(tests=self.sections[0].tests).summary
|
137
152
|
)
|
138
153
|
|
139
154
|
def _add_footer(self):
|
155
|
+
"""Add the footer."""
|
140
156
|
footer = """
|
141
157
|
<style>
|
142
158
|
.result {
|
@@ -152,6 +168,7 @@ class TestSuiteSummary:
|
|
152
168
|
self._widgets.append(widgets.HTML(value=footer))
|
153
169
|
|
154
170
|
def _build_summary(self):
|
171
|
+
"""Build the complete summary."""
|
155
172
|
self._widgets = []
|
156
173
|
|
157
174
|
self._add_title()
|
@@ -166,4 +183,5 @@ class TestSuiteSummary:
|
|
166
183
|
self.summary = widgets.VBox(self._widgets)
|
167
184
|
|
168
185
|
def display(self):
|
186
|
+
"""Display the summary."""
|
169
187
|
display(self.summary)
|
@@ -16,7 +16,7 @@ logger = get_logger(__name__)
|
|
16
16
|
|
17
17
|
class TestSuiteTest:
|
18
18
|
"""
|
19
|
-
Wraps a 'Test' in a Test Suite and handles logic and state for that test
|
19
|
+
Wraps a 'Test' in a Test Suite and handles logic and state for that test.
|
20
20
|
"""
|
21
21
|
|
22
22
|
test_id: str
|
@@ -28,10 +28,10 @@ class TestSuiteTest:
|
|
28
28
|
_load_failed: bool = False
|
29
29
|
|
30
30
|
def __init__(self, test_id_or_obj):
|
31
|
-
"""Load the test class from the test
|
31
|
+
"""Load the test class from the test ID.
|
32
32
|
|
33
33
|
Args:
|
34
|
-
test_id_or_obj (str): The test
|
34
|
+
test_id_or_obj (str): The test ID or a dict with test ID and other options.
|
35
35
|
"""
|
36
36
|
if isinstance(test_id_or_obj, str):
|
37
37
|
self.test_id = test_id_or_obj
|
@@ -42,7 +42,7 @@ class TestSuiteTest:
|
|
42
42
|
self.name = test_id_to_name(self.test_id)
|
43
43
|
|
44
44
|
def get_default_config(self):
|
45
|
-
"""Returns the default configuration for the test"""
|
45
|
+
"""Returns the default configuration for the test."""
|
46
46
|
try:
|
47
47
|
test_func = load_test(self.test_id)
|
48
48
|
except LoadTestError as e:
|
@@ -66,7 +66,7 @@ class TestSuiteTest:
|
|
66
66
|
return config
|
67
67
|
|
68
68
|
def run(self, fail_fast: bool = False, config: dict = None):
|
69
|
-
"""Run the test"""
|
69
|
+
"""Run the test."""
|
70
70
|
if self._load_failed:
|
71
71
|
return
|
72
72
|
|
@@ -110,7 +110,7 @@ class TestSuiteTest:
|
|
110
110
|
)
|
111
111
|
|
112
112
|
async def log_async(self):
|
113
|
-
"""Log the result for this test to ValidMind"""
|
113
|
+
"""Log the result for this test to ValidMind."""
|
114
114
|
if not self.result:
|
115
115
|
raise ValueError("Cannot log test result before running the test")
|
116
116
|
|