validmind 2.1.1__py3-none-any.whl → 2.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- validmind/__version__.py +1 -1
- validmind/ai.py +72 -49
- validmind/api_client.py +42 -16
- validmind/client.py +68 -25
- validmind/datasets/llm/rag/__init__.py +11 -0
- validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_1.csv +30 -0
- validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_2.csv +30 -0
- validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_3.csv +53 -0
- validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_4.csv +53 -0
- validmind/datasets/llm/rag/datasets/rfp_existing_questions_client_5.csv +53 -0
- validmind/datasets/llm/rag/rfp.py +41 -0
- validmind/errors.py +1 -1
- validmind/html_templates/__init__.py +0 -0
- validmind/html_templates/content_blocks.py +89 -14
- validmind/models/__init__.py +7 -4
- validmind/models/foundation.py +8 -34
- validmind/models/function.py +51 -0
- validmind/models/huggingface.py +16 -46
- validmind/models/metadata.py +42 -0
- validmind/models/pipeline.py +66 -0
- validmind/models/pytorch.py +8 -42
- validmind/models/r_model.py +33 -82
- validmind/models/sklearn.py +39 -38
- validmind/template.py +8 -26
- validmind/tests/__init__.py +43 -20
- validmind/tests/data_validation/ANOVAOneWayTable.py +1 -1
- validmind/tests/data_validation/ChiSquaredFeaturesTable.py +1 -1
- validmind/tests/data_validation/DescriptiveStatistics.py +2 -4
- validmind/tests/data_validation/Duplicates.py +1 -1
- validmind/tests/data_validation/IsolationForestOutliers.py +2 -2
- validmind/tests/data_validation/LaggedCorrelationHeatmap.py +1 -1
- validmind/tests/data_validation/TargetRateBarPlots.py +1 -1
- validmind/tests/data_validation/nlp/LanguageDetection.py +59 -0
- validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py +48 -0
- validmind/tests/data_validation/nlp/Punctuations.py +11 -12
- validmind/tests/data_validation/nlp/Sentiment.py +57 -0
- validmind/tests/data_validation/nlp/Toxicity.py +45 -0
- validmind/tests/decorator.py +12 -7
- validmind/tests/model_validation/BertScore.py +100 -98
- validmind/tests/model_validation/BleuScore.py +93 -64
- validmind/tests/model_validation/ContextualRecall.py +74 -91
- validmind/tests/model_validation/MeteorScore.py +86 -74
- validmind/tests/model_validation/RegardScore.py +103 -121
- validmind/tests/model_validation/RougeScore.py +118 -0
- validmind/tests/model_validation/TokenDisparity.py +84 -121
- validmind/tests/model_validation/ToxicityScore.py +109 -123
- validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py +96 -0
- validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py +71 -0
- validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py +92 -0
- validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py +69 -0
- validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py +78 -0
- validmind/tests/model_validation/embeddings/StabilityAnalysis.py +35 -23
- validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py +3 -0
- validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py +7 -1
- validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py +3 -0
- validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py +3 -0
- validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py +99 -0
- validmind/tests/model_validation/ragas/AnswerCorrectness.py +131 -0
- validmind/tests/model_validation/ragas/AnswerRelevance.py +134 -0
- validmind/tests/model_validation/ragas/AnswerSimilarity.py +119 -0
- validmind/tests/model_validation/ragas/AspectCritique.py +167 -0
- validmind/tests/model_validation/ragas/ContextEntityRecall.py +133 -0
- validmind/tests/model_validation/ragas/ContextPrecision.py +123 -0
- validmind/tests/model_validation/ragas/ContextRecall.py +123 -0
- validmind/tests/model_validation/ragas/ContextRelevancy.py +114 -0
- validmind/tests/model_validation/ragas/Faithfulness.py +119 -0
- validmind/tests/model_validation/ragas/utils.py +66 -0
- validmind/tests/model_validation/sklearn/OverfitDiagnosis.py +3 -7
- validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py +8 -9
- validmind/tests/model_validation/sklearn/PopulationStabilityIndex.py +5 -10
- validmind/tests/model_validation/sklearn/PrecisionRecallCurve.py +3 -2
- validmind/tests/model_validation/sklearn/ROCCurve.py +2 -1
- validmind/tests/model_validation/sklearn/RegressionR2Square.py +1 -1
- validmind/tests/model_validation/sklearn/RobustnessDiagnosis.py +2 -3
- validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +7 -11
- validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py +3 -4
- validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py +1 -1
- validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py +1 -1
- validmind/tests/model_validation/statsmodels/RegressionModelInsampleComparison.py +1 -1
- validmind/tests/model_validation/statsmodels/RegressionModelOutsampleComparison.py +1 -1
- validmind/tests/model_validation/statsmodels/RegressionModelSummary.py +1 -1
- validmind/tests/model_validation/statsmodels/RegressionModelsCoeffs.py +1 -1
- validmind/tests/model_validation/statsmodels/RegressionModelsPerformance.py +1 -1
- validmind/tests/model_validation/statsmodels/ScorecardHistogram.py +5 -6
- validmind/unit_metrics/__init__.py +26 -49
- validmind/unit_metrics/composite.py +13 -7
- validmind/unit_metrics/regression/sklearn/AdjustedRSquaredScore.py +1 -1
- validmind/utils.py +99 -6
- validmind/vm_models/__init__.py +1 -1
- validmind/vm_models/dataset/__init__.py +7 -0
- validmind/vm_models/dataset/dataset.py +560 -0
- validmind/vm_models/dataset/utils.py +146 -0
- validmind/vm_models/model.py +97 -72
- validmind/vm_models/test/metric.py +9 -24
- validmind/vm_models/test/result_wrapper.py +124 -28
- validmind/vm_models/test/threshold_test.py +10 -28
- validmind/vm_models/test_context.py +1 -1
- validmind/vm_models/test_suite/summary.py +3 -4
- {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/METADATA +5 -3
- {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/RECORD +103 -78
- validmind/models/catboost.py +0 -33
- validmind/models/statsmodels.py +0 -50
- validmind/models/xgboost.py +0 -30
- validmind/tests/model_validation/BertScoreAggregate.py +0 -90
- validmind/tests/model_validation/RegardHistogram.py +0 -148
- validmind/tests/model_validation/RougeMetrics.py +0 -147
- validmind/tests/model_validation/RougeMetricsAggregate.py +0 -133
- validmind/tests/model_validation/SelfCheckNLIScore.py +0 -112
- validmind/tests/model_validation/ToxicityHistogram.py +0 -136
- validmind/vm_models/dataset.py +0 -1303
- {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/LICENSE +0 -0
- {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/WHEEL +0 -0
- {validmind-2.1.1.dist-info → validmind-2.2.4.dist-info}/entry_points.txt +0 -0
@@ -106,7 +106,7 @@ class RegressionModelInsampleComparison(Metric):
|
|
106
106
|
evaluation_results = []
|
107
107
|
|
108
108
|
for i, model in enumerate(models):
|
109
|
-
X_columns = dataset.
|
109
|
+
X_columns = dataset.feature_columns
|
110
110
|
y_true = dataset.y
|
111
111
|
y_pred = dataset.y_pred(model)
|
112
112
|
|
@@ -96,7 +96,7 @@ class RegressionModelOutsampleComparison(Metric):
|
|
96
96
|
|
97
97
|
for fitted_model in model_list:
|
98
98
|
# Extract the column names of the independent variables from the model
|
99
|
-
independent_vars = dataset.
|
99
|
+
independent_vars = dataset.feature_columns
|
100
100
|
|
101
101
|
# Separate the target variable and features in the test dataset
|
102
102
|
y_test = dataset.y
|
@@ -57,7 +57,7 @@ class RegressionModelSummary(Metric):
|
|
57
57
|
}
|
58
58
|
|
59
59
|
def run(self):
|
60
|
-
X_columns = self.inputs.dataset.
|
60
|
+
X_columns = self.inputs.dataset.feature_columns
|
61
61
|
|
62
62
|
y_true = self.inputs.dataset.y
|
63
63
|
y_pred = self.inputs.dataset.y_pred(self.inputs.model)
|
@@ -73,7 +73,7 @@ class RegressionModelsCoeffs(Metric):
|
|
73
73
|
raise ValueError("List of models must be provided in the models parameter")
|
74
74
|
|
75
75
|
for model in self.inputs.models:
|
76
|
-
if model.
|
76
|
+
if model.class_ != "statsmodels" and model.class_ != "R":
|
77
77
|
raise SkipTestError(
|
78
78
|
"Only statsmodels and R models are supported for this metric"
|
79
79
|
)
|
@@ -80,7 +80,7 @@ class RegressionModelsPerformance(Metric):
|
|
80
80
|
evaluation_results = []
|
81
81
|
|
82
82
|
for model, dataset in zip(models, datasets):
|
83
|
-
X_columns = dataset.
|
83
|
+
X_columns = dataset.feature_columns
|
84
84
|
y_true = dataset.y
|
85
85
|
y_pred = dataset.y_pred(model)
|
86
86
|
|
@@ -112,16 +112,15 @@ class ScorecardHistogram(Metric):
|
|
112
112
|
dataframes = []
|
113
113
|
metric_value = {"score_histogram": {}}
|
114
114
|
for dataset in self.inputs.datasets:
|
115
|
-
|
116
|
-
# Check if the score_column exists in the DataFrame
|
117
|
-
if score_column not in df.columns:
|
115
|
+
if score_column not in dataset.df.columns:
|
118
116
|
raise ValueError(
|
119
117
|
f"The required column '{score_column}' is not present in the dataset with input_id {dataset.input_id}"
|
120
118
|
)
|
121
119
|
|
122
|
-
|
123
|
-
|
124
|
-
|
120
|
+
dataframes.append(dataset.df.copy())
|
121
|
+
metric_value["score_histogram"][dataset.input_id] = list(
|
122
|
+
dataset.df[score_column]
|
123
|
+
)
|
125
124
|
|
126
125
|
figures = self.plot_score_histogram(
|
127
126
|
dataframes, dataset_titles, score_column, target_column, title
|
@@ -6,8 +6,6 @@ import hashlib
|
|
6
6
|
import json
|
7
7
|
from importlib import import_module
|
8
8
|
|
9
|
-
import numpy as np
|
10
|
-
|
11
9
|
from ..tests.decorator import _build_result, _inspect_signature
|
12
10
|
from ..utils import get_model_info, test_id_to_name
|
13
11
|
|
@@ -58,7 +56,7 @@ def _serialize_model(model):
|
|
58
56
|
return hash_object.hexdigest()
|
59
57
|
|
60
58
|
|
61
|
-
def _serialize_dataset(dataset,
|
59
|
+
def _serialize_dataset(dataset, model):
|
62
60
|
"""
|
63
61
|
Serialize the description of the dataset input to a unique hash.
|
64
62
|
|
@@ -68,11 +66,11 @@ def _serialize_dataset(dataset, model_id):
|
|
68
66
|
|
69
67
|
Args:
|
70
68
|
dataset: The dataset object, which should have properties like _df (pandas DataFrame),
|
71
|
-
target_column (string), feature_columns (list of strings), and
|
72
|
-
|
69
|
+
target_column (string), feature_columns (list of strings), and extra_columns (dict).
|
70
|
+
model (VMModel): The model whose predictions will be included in the serialized dataset
|
73
71
|
|
74
72
|
Returns:
|
75
|
-
str:
|
73
|
+
str: MD5 hash of the dataset
|
76
74
|
|
77
75
|
Note:
|
78
76
|
Including the model ID and prediction column name in the hash calculation ensures uniqueness,
|
@@ -80,57 +78,33 @@ def _serialize_dataset(dataset, model_id):
|
|
80
78
|
This approach guarantees that the hash will distinguish between model-generated predictions
|
81
79
|
and pre-computed prediction columns, addressing potential hash collisions.
|
82
80
|
"""
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
columns = (
|
92
|
-
[dataset._target_column] + dataset._feature_columns + [prediction_column_name]
|
81
|
+
return _fast_hash(
|
82
|
+
dataset.df[
|
83
|
+
[
|
84
|
+
*dataset.feature_columns,
|
85
|
+
dataset.target_column,
|
86
|
+
dataset.prediction_column(model),
|
87
|
+
]
|
88
|
+
]
|
93
89
|
)
|
94
90
|
|
95
|
-
# Use _fast_hash function and include model_and_prediction_info in the hash calculation
|
96
|
-
hash_digest = _fast_hash(
|
97
|
-
dataset._df[columns], model_and_prediction_info=model_and_prediction_info
|
98
|
-
)
|
99
|
-
|
100
|
-
return hash_digest
|
101
|
-
|
102
91
|
|
103
|
-
def _fast_hash(df, sample_size=1000
|
92
|
+
def _fast_hash(df, sample_size=1000):
|
104
93
|
"""
|
105
|
-
Generates a hash
|
106
|
-
and optionally model and prediction information.
|
94
|
+
Generates a fast hash by sampling, converting to string and md5 hashing.
|
107
95
|
|
108
96
|
Args:
|
109
97
|
df (pd.DataFrame): The DataFrame to hash.
|
110
98
|
sample_size (int): The maximum number of rows to include in the sample.
|
111
|
-
model_and_prediction_info (bytes, optional): Additional information to include in the hash.
|
112
99
|
|
113
100
|
Returns:
|
114
|
-
str:
|
101
|
+
str: MD5 hash of the DataFrame.
|
115
102
|
"""
|
116
|
-
|
117
|
-
rows_bytes = str(len(df)).encode()
|
103
|
+
df_sample = df.sample(n=min(sample_size, len(df)), random_state=42)
|
118
104
|
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
else:
|
123
|
-
df_sample = df
|
124
|
-
|
125
|
-
# Convert the sampled DataFrame to a byte array. np.asarray ensures compatibility with various DataFrame contents.
|
126
|
-
byte_array = np.asarray(df_sample).data.tobytes()
|
127
|
-
|
128
|
-
# Initialize the hash object and update it with the row count, data bytes, and additional info
|
129
|
-
hash_obj = hashlib.sha256(
|
130
|
-
rows_bytes + byte_array + (model_and_prediction_info or b"")
|
131
|
-
)
|
132
|
-
|
133
|
-
return hash_obj.hexdigest()
|
105
|
+
return hashlib.md5(
|
106
|
+
df_sample.to_string(header=True, index=True).encode()
|
107
|
+
).hexdigest()
|
134
108
|
|
135
109
|
|
136
110
|
def get_metric_cache_key(metric_id, params, inputs):
|
@@ -150,9 +124,8 @@ def get_metric_cache_key(metric_id, params, inputs):
|
|
150
124
|
|
151
125
|
dataset = inputs["dataset"]
|
152
126
|
model = inputs["model"]
|
153
|
-
model_id = model.input_id
|
154
127
|
|
155
|
-
cache_elements.append(_serialize_dataset(dataset,
|
128
|
+
cache_elements.append(_serialize_dataset(dataset, model))
|
156
129
|
|
157
130
|
cache_elements.append(_serialize_model(model))
|
158
131
|
|
@@ -197,7 +170,11 @@ def run_metric(metric_id, inputs=None, params=None, show=True, value_only=False)
|
|
197
170
|
**{k: v for k, v in inputs.items() if k in _inputs.keys()},
|
198
171
|
**{k: v for k, v in params.items() if k in _params.keys()},
|
199
172
|
)
|
200
|
-
unit_metric_results_cache[cache_key] = (
|
173
|
+
unit_metric_results_cache[cache_key] = (
|
174
|
+
result,
|
175
|
+
# store the input ids that were used to calculate the result
|
176
|
+
[v.input_id for v in inputs.values()],
|
177
|
+
)
|
201
178
|
|
202
179
|
value = unit_metric_results_cache[cache_key][0]
|
203
180
|
|
@@ -235,7 +212,7 @@ def run_metric(metric_id, inputs=None, params=None, show=True, value_only=False)
|
|
235
212
|
)
|
236
213
|
|
237
214
|
# in case the user tries to log the result object
|
238
|
-
def log(
|
215
|
+
def log():
|
239
216
|
raise Exception(
|
240
217
|
"Cannot log unit metrics directly..."
|
241
218
|
"You can run this unit metric as part of a composite metric and log that"
|
@@ -8,7 +8,7 @@ from uuid import uuid4
|
|
8
8
|
|
9
9
|
from ..logging import get_logger
|
10
10
|
from ..tests.decorator import _inspect_signature
|
11
|
-
from ..utils import run_async, test_id_to_name
|
11
|
+
from ..utils import get_description_metadata, run_async, test_id_to_name
|
12
12
|
from ..vm_models.test.metric import Metric
|
13
13
|
from ..vm_models.test.metric_result import MetricResult
|
14
14
|
from ..vm_models.test.result_summary import ResultSummary, ResultTable
|
@@ -37,6 +37,7 @@ class CompositeMetric(Metric):
|
|
37
37
|
metric_ids=self.unit_metrics,
|
38
38
|
description=self.description(),
|
39
39
|
inputs=self._get_input_dict(),
|
40
|
+
accessed_inputs=self.get_accessed_inputs(),
|
40
41
|
params=self.params,
|
41
42
|
output_template=self.output_template,
|
42
43
|
show=False,
|
@@ -103,6 +104,7 @@ def run_metrics(
|
|
103
104
|
description: str = None,
|
104
105
|
output_template: str = None,
|
105
106
|
inputs: dict = None,
|
107
|
+
accessed_inputs: List[str] = None,
|
106
108
|
params: dict = None,
|
107
109
|
test_id: str = None,
|
108
110
|
show: bool = True,
|
@@ -128,6 +130,8 @@ def run_metrics(
|
|
128
130
|
output_template (_type_, optional): Output template to customize the result
|
129
131
|
table.
|
130
132
|
inputs (_type_, optional): Inputs to pass to the unit metrics. Defaults to None
|
133
|
+
accessed_inputs (_type_, optional): Inputs that were accessed when running the
|
134
|
+
unit metrics - used for input tracking. Defaults to None.
|
131
135
|
params (_type_, optional): Parameters to pass to the unit metrics. Defaults to
|
132
136
|
None.
|
133
137
|
test_id (str, optional): Test ID of the composite metric. Required if name is
|
@@ -196,13 +200,15 @@ def run_metrics(
|
|
196
200
|
</style>
|
197
201
|
"""
|
198
202
|
|
203
|
+
result_summary = ResultSummary(results=[ResultTable(data=[results])])
|
199
204
|
result_wrapper = MetricResultWrapper(
|
200
205
|
result_id=test_id,
|
201
206
|
result_metadata=[
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
207
|
+
get_description_metadata(
|
208
|
+
test_id=test_id,
|
209
|
+
default_description=description,
|
210
|
+
summary=result_summary.serialize(),
|
211
|
+
),
|
206
212
|
{
|
207
213
|
"content_id": f"composite_metric_def:{test_id}:unit_metrics",
|
208
214
|
"json": metric_ids,
|
@@ -212,13 +218,13 @@ def run_metrics(
|
|
212
218
|
"json": {"output_template": output_template},
|
213
219
|
},
|
214
220
|
],
|
215
|
-
inputs=
|
221
|
+
inputs=accessed_inputs,
|
216
222
|
output_template=output_template,
|
217
223
|
metric=MetricResult(
|
218
224
|
key=test_id,
|
219
225
|
ref_id=str(uuid4()),
|
220
226
|
value=results,
|
221
|
-
summary=
|
227
|
+
summary=result_summary,
|
222
228
|
),
|
223
229
|
)
|
224
230
|
|
@@ -16,6 +16,6 @@ def AdjustedRSquaredScore(model, dataset):
|
|
16
16
|
)
|
17
17
|
|
18
18
|
row_count = len(dataset.y)
|
19
|
-
feature_count = len(dataset.
|
19
|
+
feature_count = len(dataset.feature_columns)
|
20
20
|
|
21
21
|
return 1 - (1 - r2_score) * (row_count - 1) / (row_count - feature_count)
|
validmind/utils.py
CHANGED
@@ -6,22 +6,29 @@ import asyncio
|
|
6
6
|
import difflib
|
7
7
|
import json
|
8
8
|
import math
|
9
|
+
import os
|
9
10
|
import re
|
10
11
|
import sys
|
11
12
|
from platform import python_version
|
12
13
|
from typing import Any
|
13
14
|
|
14
15
|
import matplotlib.pylab as pylab
|
16
|
+
import mistune
|
15
17
|
import nest_asyncio
|
16
18
|
import numpy as np
|
17
19
|
import pandas as pd
|
18
20
|
import seaborn as sns
|
19
21
|
from IPython.core import getipython
|
20
|
-
from IPython.display import HTML
|
22
|
+
from IPython.display import HTML
|
23
|
+
from IPython.display import display as ipy_display
|
24
|
+
from latex2mathml.converter import convert
|
21
25
|
from matplotlib.axes._axes import _log as matplotlib_axes_logger
|
22
26
|
from numpy import ndarray
|
23
27
|
from tabulate import tabulate
|
24
28
|
|
29
|
+
from .ai import generate_description
|
30
|
+
from .html_templates.content_blocks import math_jax_snippet, python_syntax_highlighting
|
31
|
+
|
25
32
|
DEFAULT_BIG_NUMBER_DECIMALS = 2
|
26
33
|
DEFAULT_SMALL_NUMBER_DECIMALS = 4
|
27
34
|
|
@@ -97,6 +104,8 @@ class NumpyEncoder(json.JSONEncoder):
|
|
97
104
|
return bool(obj)
|
98
105
|
if isinstance(obj, pd.Timestamp):
|
99
106
|
return str(obj)
|
107
|
+
if isinstance(obj, set):
|
108
|
+
return list(obj)
|
100
109
|
return super().default(obj)
|
101
110
|
|
102
111
|
def encode(self, obj):
|
@@ -345,10 +354,10 @@ def test_id_to_name(test_id: str) -> str:
|
|
345
354
|
|
346
355
|
def get_model_info(model):
|
347
356
|
"""Attempts to extract all model info from a model object instance"""
|
348
|
-
architecture = model.
|
349
|
-
framework = model.
|
350
|
-
framework_version = model.
|
351
|
-
language = model.
|
357
|
+
architecture = model.name
|
358
|
+
framework = model.library
|
359
|
+
framework_version = model.library_version
|
360
|
+
language = model.language
|
352
361
|
|
353
362
|
if language is None:
|
354
363
|
language = f"Python {python_version()}"
|
@@ -402,4 +411,88 @@ def preview_test_config(config):
|
|
402
411
|
<div id="collapsibleContent" style="display:none;"><pre>{formatted_json}</pre></div>
|
403
412
|
"""
|
404
413
|
|
405
|
-
|
414
|
+
ipy_display(HTML(collapsible_html))
|
415
|
+
|
416
|
+
|
417
|
+
def display(widget_or_html, syntax_highlighting=True, mathjax=True):
|
418
|
+
"""Display widgets with extra goodies (syntax highlighting, MathJax, etc.)"""
|
419
|
+
if isinstance(widget_or_html, str):
|
420
|
+
ipy_display(HTML(widget_or_html))
|
421
|
+
# if html we can auto-detect if we actually need syntax highlighting or MathJax
|
422
|
+
syntax_highlighting = 'class="language-' in widget_or_html
|
423
|
+
mathjax = "$$" in widget_or_html
|
424
|
+
else:
|
425
|
+
ipy_display(widget_or_html)
|
426
|
+
|
427
|
+
if syntax_highlighting:
|
428
|
+
ipy_display(HTML(python_syntax_highlighting))
|
429
|
+
|
430
|
+
if mathjax:
|
431
|
+
ipy_display(HTML(math_jax_snippet))
|
432
|
+
|
433
|
+
|
434
|
+
def md_to_html(md: str, mathml=False) -> str:
|
435
|
+
"""Converts Markdown to HTML using mistune with plugins"""
|
436
|
+
# use mistune with math plugin to convert to html
|
437
|
+
html = mistune.create_markdown(
|
438
|
+
plugins=["math", "table", "strikethrough", "footnotes"]
|
439
|
+
)(md)
|
440
|
+
|
441
|
+
if not mathml:
|
442
|
+
# return the html as is (with latex that will be rendered by MathJax)
|
443
|
+
return html
|
444
|
+
|
445
|
+
# convert the latex to MathML which CKeditor can render
|
446
|
+
math_block_pattern = re.compile(r'<div class="math">\$\$([\s\S]*?)\$\$</div>')
|
447
|
+
html = math_block_pattern.sub(
|
448
|
+
lambda match: "<p>{}</p>".format(convert(match.group(1), display="block")), html
|
449
|
+
)
|
450
|
+
|
451
|
+
inline_math_pattern = re.compile(r'<span class="math">\\\((.*?)\\\)</span>')
|
452
|
+
html = inline_math_pattern.sub(
|
453
|
+
lambda match: "<span>{}</span>".format(
|
454
|
+
convert(match.group(1), display="inline")
|
455
|
+
),
|
456
|
+
html,
|
457
|
+
)
|
458
|
+
|
459
|
+
return html
|
460
|
+
|
461
|
+
|
462
|
+
def get_description_metadata(test_id, default_description, summary=None, figures=None):
|
463
|
+
"""Get Metadata Dictionary for a Test or Metric Result
|
464
|
+
|
465
|
+
Generates an LLM interpretation of the test results or uses the default
|
466
|
+
description and returns a metadata object that can be logged with the test results.
|
467
|
+
|
468
|
+
To enable LLM-generated descriptions, set the VALIDMIND_LLM_DESCRIPTIONS_ENABLED
|
469
|
+
environment variable to "true". The default description will be used if LLM
|
470
|
+
descriptions are disabled.
|
471
|
+
|
472
|
+
Note: Either the summary or figures must be provided to generate the description.
|
473
|
+
|
474
|
+
Args:
|
475
|
+
test_id (str): The test ID
|
476
|
+
default_description (str): The default description for the test
|
477
|
+
summary (Any): The test summary or results to interpret
|
478
|
+
figures (List[Figure]): The figures to attach to the test suite result
|
479
|
+
|
480
|
+
Returns:
|
481
|
+
dict: The metadata object to be logged with the test results
|
482
|
+
"""
|
483
|
+
if os.environ.get("VALIDMIND_LLM_DESCRIPTIONS_ENABLED", "false").lower() == "true":
|
484
|
+
revision_name = "Generated by ValidMind AI"
|
485
|
+
description = generate_description(
|
486
|
+
test_name=test_id,
|
487
|
+
test_description=default_description,
|
488
|
+
test_summary=summary,
|
489
|
+
figures=figures,
|
490
|
+
)
|
491
|
+
else:
|
492
|
+
revision_name = "Default Description"
|
493
|
+
description = default_description
|
494
|
+
|
495
|
+
return {
|
496
|
+
"content_id": f"metric_description:{test_id}::{revision_name}",
|
497
|
+
"text": description,
|
498
|
+
}
|
validmind/vm_models/__init__.py
CHANGED
@@ -0,0 +1,7 @@
|
|
1
|
+
# Copyright © 2023-2024 ValidMind Inc. All rights reserved.
|
2
|
+
# See the LICENSE file in the root of this repository for details.
|
3
|
+
# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
|
4
|
+
|
5
|
+
from .dataset import DataFrameDataset, PolarsDataset, TorchDataset, VMDataset
|
6
|
+
|
7
|
+
__all__ = ["VMDataset", "DataFrameDataset", "PolarsDataset", "TorchDataset"]
|