validmind 2.8.10__py3-none-any.whl → 2.8.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- validmind/__version__.py +1 -1
- validmind/ai/test_descriptions.py +4 -2
- validmind/tests/data_validation/ACFandPACFPlot.py +4 -1
- validmind/tests/data_validation/AutoMA.py +1 -1
- validmind/tests/data_validation/BivariateScatterPlots.py +5 -1
- validmind/tests/data_validation/BoxPierce.py +3 -1
- validmind/tests/data_validation/ClassImbalance.py +1 -1
- validmind/tests/data_validation/DatasetDescription.py +1 -1
- validmind/tests/data_validation/DickeyFullerGLS.py +1 -1
- validmind/tests/data_validation/FeatureTargetCorrelationPlot.py +1 -1
- validmind/tests/data_validation/HighCardinality.py +5 -1
- validmind/tests/data_validation/HighPearsonCorrelation.py +1 -1
- validmind/tests/data_validation/IQROutliersBarPlot.py +5 -3
- validmind/tests/data_validation/IQROutliersTable.py +5 -2
- validmind/tests/data_validation/IsolationForestOutliers.py +5 -4
- validmind/tests/data_validation/JarqueBera.py +2 -2
- validmind/tests/data_validation/LJungBox.py +2 -2
- validmind/tests/data_validation/LaggedCorrelationHeatmap.py +1 -1
- validmind/tests/data_validation/MissingValues.py +14 -10
- validmind/tests/data_validation/MissingValuesBarPlot.py +3 -1
- validmind/tests/data_validation/MutualInformation.py +2 -1
- validmind/tests/data_validation/PearsonCorrelationMatrix.py +1 -1
- validmind/tests/data_validation/ProtectedClassesCombination.py +2 -0
- validmind/tests/data_validation/ProtectedClassesDescription.py +2 -2
- validmind/tests/data_validation/ProtectedClassesDisparity.py +9 -5
- validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py +10 -2
- validmind/tests/data_validation/RollingStatsPlot.py +2 -1
- validmind/tests/data_validation/ScoreBandDefaultRates.py +4 -2
- validmind/tests/data_validation/SeasonalDecompose.py +1 -1
- validmind/tests/data_validation/ShapiroWilk.py +2 -2
- validmind/tests/data_validation/SpreadPlot.py +1 -1
- validmind/tests/data_validation/TabularCategoricalBarPlots.py +1 -1
- validmind/tests/data_validation/TabularDateTimeHistograms.py +1 -1
- validmind/tests/data_validation/TargetRateBarPlots.py +4 -1
- validmind/tests/data_validation/TimeSeriesFrequency.py +1 -1
- validmind/tests/data_validation/TimeSeriesOutliers.py +7 -2
- validmind/tests/data_validation/WOEBinPlots.py +1 -1
- validmind/tests/data_validation/WOEBinTable.py +1 -1
- validmind/tests/data_validation/ZivotAndrewsArch.py +5 -2
- validmind/tests/data_validation/nlp/CommonWords.py +1 -1
- validmind/tests/data_validation/nlp/Hashtags.py +1 -1
- validmind/tests/data_validation/nlp/LanguageDetection.py +1 -1
- validmind/tests/data_validation/nlp/Mentions.py +1 -1
- validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py +5 -1
- validmind/tests/data_validation/nlp/Punctuations.py +1 -1
- validmind/tests/data_validation/nlp/Sentiment.py +3 -1
- validmind/tests/data_validation/nlp/TextDescription.py +1 -1
- validmind/tests/data_validation/nlp/Toxicity.py +1 -1
- validmind/tests/model_validation/BertScore.py +7 -1
- validmind/tests/model_validation/BleuScore.py +7 -1
- validmind/tests/model_validation/ClusterSizeDistribution.py +3 -1
- validmind/tests/model_validation/ContextualRecall.py +9 -1
- validmind/tests/model_validation/FeaturesAUC.py +1 -1
- validmind/tests/model_validation/MeteorScore.py +7 -1
- validmind/tests/model_validation/ModelPredictionResiduals.py +5 -1
- validmind/tests/model_validation/RegardScore.py +6 -1
- validmind/tests/model_validation/RegressionResidualsPlot.py +10 -1
- validmind/tests/model_validation/RougeScore.py +3 -1
- validmind/tests/model_validation/TimeSeriesPredictionWithCI.py +2 -0
- validmind/tests/model_validation/TimeSeriesPredictionsPlot.py +10 -2
- validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py +6 -2
- validmind/tests/model_validation/TokenDisparity.py +5 -1
- validmind/tests/model_validation/ToxicityScore.py +2 -0
- validmind/tests/model_validation/embeddings/ClusterDistribution.py +1 -1
- validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py +5 -1
- validmind/tests/model_validation/embeddings/CosineSimilarityDistribution.py +5 -1
- validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py +5 -1
- validmind/tests/model_validation/embeddings/DescriptiveAnalytics.py +2 -0
- validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py +5 -1
- validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py +6 -2
- validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py +3 -1
- validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py +4 -1
- validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py +5 -1
- validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py +5 -1
- validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py +5 -1
- validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py +5 -1
- validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py +6 -1
- validmind/tests/model_validation/ragas/AnswerCorrectness.py +1 -1
- validmind/tests/model_validation/ragas/AspectCritic.py +4 -1
- validmind/tests/model_validation/ragas/ContextEntityRecall.py +1 -1
- validmind/tests/model_validation/ragas/ContextPrecision.py +1 -1
- validmind/tests/model_validation/ragas/ContextPrecisionWithoutReference.py +1 -1
- validmind/tests/model_validation/ragas/ContextRecall.py +1 -1
- validmind/tests/model_validation/ragas/Faithfulness.py +1 -1
- validmind/tests/model_validation/ragas/NoiseSensitivity.py +1 -1
- validmind/tests/model_validation/ragas/ResponseRelevancy.py +1 -1
- validmind/tests/model_validation/ragas/SemanticSimilarity.py +1 -1
- validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py +9 -9
- validmind/tests/model_validation/sklearn/AdjustedRandIndex.py +9 -9
- validmind/tests/model_validation/sklearn/CalibrationCurve.py +5 -2
- validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py +15 -2
- validmind/tests/model_validation/sklearn/ClusterCosineSimilarity.py +5 -1
- validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py +24 -14
- validmind/tests/model_validation/sklearn/CompletenessScore.py +8 -9
- validmind/tests/model_validation/sklearn/ConfusionMatrix.py +22 -3
- validmind/tests/model_validation/sklearn/FeatureImportance.py +6 -2
- validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py +12 -9
- validmind/tests/model_validation/sklearn/HomogeneityScore.py +14 -9
- validmind/tests/model_validation/sklearn/HyperParametersTuning.py +4 -2
- validmind/tests/model_validation/sklearn/KMeansClustersOptimization.py +6 -1
- validmind/tests/model_validation/sklearn/MinimumAccuracy.py +12 -7
- validmind/tests/model_validation/sklearn/MinimumF1Score.py +12 -7
- validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py +21 -6
- validmind/tests/model_validation/sklearn/OverfitDiagnosis.py +8 -2
- validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py +5 -1
- validmind/tests/model_validation/sklearn/PopulationStabilityIndex.py +5 -1
- validmind/tests/model_validation/sklearn/PrecisionRecallCurve.py +6 -1
- validmind/tests/model_validation/sklearn/ROCCurve.py +3 -1
- validmind/tests/model_validation/sklearn/RegressionErrors.py +6 -2
- validmind/tests/model_validation/sklearn/RegressionPerformance.py +13 -8
- validmind/tests/model_validation/sklearn/RegressionR2Square.py +8 -5
- validmind/tests/model_validation/sklearn/RobustnessDiagnosis.py +5 -1
- validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +6 -1
- validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py +10 -2
- validmind/tests/model_validation/sklearn/SilhouettePlot.py +5 -1
- validmind/tests/model_validation/sklearn/VMeasure.py +12 -9
- validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py +5 -1
- validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py +6 -1
- validmind/tests/model_validation/statsmodels/GINITable.py +8 -1
- validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py +2 -2
- validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py +6 -2
- validmind/tests/model_validation/statsmodels/RegressionCoeffs.py +8 -2
- validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py +3 -1
- validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py +7 -2
- validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py +2 -0
- validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py +2 -0
- validmind/tests/model_validation/statsmodels/RegressionModelSummary.py +4 -2
- validmind/tests/model_validation/statsmodels/RegressionPermutationFeatureImportance.py +3 -1
- validmind/tests/ongoing_monitoring/CalibrationCurveDrift.py +11 -1
- validmind/tests/ongoing_monitoring/ClassificationAccuracyDrift.py +10 -2
- validmind/tests/ongoing_monitoring/ConfusionMatrixDrift.py +8 -1
- validmind/tests/ongoing_monitoring/CumulativePredictionProbabilitiesDrift.py +18 -2
- validmind/tests/ongoing_monitoring/FeatureDrift.py +9 -2
- validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py +8 -2
- validmind/tests/ongoing_monitoring/PredictionCorrelation.py +13 -2
- validmind/tests/ongoing_monitoring/PredictionProbabilitiesHistogramDrift.py +13 -2
- validmind/tests/ongoing_monitoring/ROCCurveDrift.py +16 -2
- validmind/tests/ongoing_monitoring/ScoreBandsDrift.py +11 -2
- validmind/tests/ongoing_monitoring/TargetPredictionDistributionPlot.py +13 -2
- validmind/tests/prompt_validation/Clarity.py +1 -1
- validmind/tests/prompt_validation/NegativeInstruction.py +1 -1
- validmind/tests/prompt_validation/Robustness.py +6 -1
- validmind/tests/prompt_validation/Specificity.py +1 -1
- validmind/vm_models/result/utils.py +4 -23
- {validmind-2.8.10.dist-info → validmind-2.8.12.dist-info}/METADATA +2 -2
- {validmind-2.8.10.dist-info → validmind-2.8.12.dist-info}/RECORD +149 -149
- {validmind-2.8.10.dist-info → validmind-2.8.12.dist-info}/LICENSE +0 -0
- {validmind-2.8.10.dist-info → validmind-2.8.12.dist-info}/WHEEL +0 -0
- {validmind-2.8.10.dist-info → validmind-2.8.12.dist-info}/entry_points.txt +0 -0
@@ -4,7 +4,7 @@
|
|
4
4
|
|
5
5
|
from sklearn.metrics import mean_squared_error, r2_score
|
6
6
|
|
7
|
-
from validmind import tags, tasks
|
7
|
+
from validmind import RawData, tags, tasks
|
8
8
|
from validmind.vm_models import VMDataset, VMModel
|
9
9
|
|
10
10
|
from .statsutils import adj_r2_score
|
@@ -58,4 +58,6 @@ def RegressionModelSummary(dataset: VMDataset, model: VMModel):
|
|
58
58
|
"MSE": mean_squared_error(y_true, y_pred, squared=True),
|
59
59
|
"RMSE": mean_squared_error(y_true, y_pred, squared=False),
|
60
60
|
}
|
61
|
-
]
|
61
|
+
], RawData(
|
62
|
+
y_true=y_true, y_pred=y_pred, model=model.input_id, dataset=dataset.input_id
|
63
|
+
)
|
@@ -9,7 +9,7 @@ import pandas as pd
|
|
9
9
|
import plotly.graph_objects as go
|
10
10
|
from sklearn.calibration import calibration_curve
|
11
11
|
|
12
|
-
from validmind import tags, tasks
|
12
|
+
from validmind import RawData, tags, tasks
|
13
13
|
from validmind.errors import SkipTestError
|
14
14
|
from validmind.vm_models import VMDataset, VMModel
|
15
15
|
|
@@ -217,4 +217,14 @@ def CalibrationCurveDrift(
|
|
217
217
|
fig,
|
218
218
|
{"Mean Predicted Probabilities": pred_df, "Fraction of Positives": true_df},
|
219
219
|
pass_fail_bool,
|
220
|
+
RawData(
|
221
|
+
prob_true_ref=prob_true_ref,
|
222
|
+
prob_pred_ref=prob_pred_ref,
|
223
|
+
prob_true_mon=prob_true_mon,
|
224
|
+
prob_pred_mon=prob_pred_mon,
|
225
|
+
bin_labels=bin_labels,
|
226
|
+
model=model.input_id,
|
227
|
+
dataset_ref=datasets[0].input_id,
|
228
|
+
dataset_mon=datasets[1].input_id,
|
229
|
+
),
|
220
230
|
)
|
@@ -8,7 +8,7 @@ import numpy as np
|
|
8
8
|
import pandas as pd
|
9
9
|
from sklearn.metrics import classification_report
|
10
10
|
|
11
|
-
from validmind import tags, tasks
|
11
|
+
from validmind import RawData, tags, tasks
|
12
12
|
from validmind.vm_models import VMDataset, VMModel
|
13
13
|
|
14
14
|
|
@@ -145,4 +145,12 @@ def ClassificationAccuracyDrift(
|
|
145
145
|
# Calculate overall pass/fail
|
146
146
|
pass_fail_bool = (df["Pass/Fail"] == "Pass").all()
|
147
147
|
|
148
|
-
|
148
|
+
raw_data = RawData(
|
149
|
+
report_reference=report_ref,
|
150
|
+
report_monitoring=report_mon,
|
151
|
+
model=model.input_id,
|
152
|
+
dataset_reference=datasets[0].input_id,
|
153
|
+
dataset_monitoring=datasets[1].input_id,
|
154
|
+
)
|
155
|
+
|
156
|
+
return ({"Classification Accuracy Metrics": df}, pass_fail_bool, raw_data)
|
@@ -8,7 +8,7 @@ import numpy as np
|
|
8
8
|
import pandas as pd
|
9
9
|
from sklearn.metrics import confusion_matrix
|
10
10
|
|
11
|
-
from validmind import tags, tasks
|
11
|
+
from validmind import RawData, tags, tasks
|
12
12
|
from validmind.vm_models import VMDataset, VMModel
|
13
13
|
|
14
14
|
|
@@ -190,4 +190,11 @@ def ConfusionMatrixDrift(
|
|
190
190
|
return (
|
191
191
|
{"Confusion Matrix Metrics": metrics_df, "Sample Counts": counts_df},
|
192
192
|
pass_fail_bool,
|
193
|
+
RawData(
|
194
|
+
confusion_matrix_reference=cm_ref,
|
195
|
+
confusion_matrix_monitoring=cm_mon,
|
196
|
+
model=model.input_id,
|
197
|
+
dataset_reference=datasets[0].input_id,
|
198
|
+
dataset_monitoring=datasets[1].input_id,
|
199
|
+
),
|
193
200
|
)
|
@@ -8,7 +8,7 @@ import numpy as np
|
|
8
8
|
import plotly.graph_objects as go
|
9
9
|
from plotly.subplots import make_subplots
|
10
10
|
|
11
|
-
from validmind import tags, tasks
|
11
|
+
from validmind import RawData, tags, tasks
|
12
12
|
from validmind.vm_models import VMDataset, VMModel
|
13
13
|
|
14
14
|
|
@@ -83,6 +83,7 @@ def CumulativePredictionProbabilitiesDrift(
|
|
83
83
|
diff_color = "rgba(148, 103, 189, 0.8)" # Purple with 0.8 opacity
|
84
84
|
|
85
85
|
figures = []
|
86
|
+
raw_data = {}
|
86
87
|
for class_value in classes:
|
87
88
|
# Create figure with secondary y-axis
|
88
89
|
fig = make_subplots(
|
@@ -175,4 +176,19 @@ def CumulativePredictionProbabilitiesDrift(
|
|
175
176
|
|
176
177
|
figures.append(fig)
|
177
178
|
|
178
|
-
|
179
|
+
# Store raw data for current class
|
180
|
+
raw_data[f"class_{class_value}_ref_probs"] = ref_probs
|
181
|
+
raw_data[f"class_{class_value}_mon_probs"] = mon_probs
|
182
|
+
raw_data[f"class_{class_value}_ref_sorted"] = ref_sorted
|
183
|
+
raw_data[f"class_{class_value}_ref_cumsum"] = ref_cumsum
|
184
|
+
raw_data[f"class_{class_value}_mon_sorted"] = mon_sorted
|
185
|
+
raw_data[f"class_{class_value}_mon_cumsum"] = mon_cumsum
|
186
|
+
|
187
|
+
return tuple(figures) + (
|
188
|
+
RawData(
|
189
|
+
model=model.input_id,
|
190
|
+
dataset_reference=datasets[0].input_id,
|
191
|
+
dataset_monitoring=datasets[1].input_id,
|
192
|
+
**raw_data,
|
193
|
+
),
|
194
|
+
)
|
@@ -6,7 +6,7 @@ import numpy as np
|
|
6
6
|
import pandas as pd
|
7
7
|
import plotly.graph_objects as go
|
8
8
|
|
9
|
-
from validmind import tags, tasks
|
9
|
+
from validmind import RawData, tags, tasks
|
10
10
|
|
11
11
|
|
12
12
|
def calculate_psi_score(actual, expected):
|
@@ -183,4 +183,11 @@ def FeatureDrift(
|
|
183
183
|
# Calculate overall pass/fail
|
184
184
|
pass_fail_bool = (psi_df["Pass/Fail"] == "Pass").all()
|
185
185
|
|
186
|
-
|
186
|
+
# Prepare raw data
|
187
|
+
raw_data = RawData(
|
188
|
+
distributions=distributions,
|
189
|
+
dataset_reference=datasets[0].input_id,
|
190
|
+
dataset_monitoring=datasets[1].input_id,
|
191
|
+
)
|
192
|
+
|
193
|
+
return ({"PSI Scores": psi_df}, *figures, pass_fail_bool, raw_data)
|
@@ -5,7 +5,7 @@
|
|
5
5
|
|
6
6
|
import matplotlib.pyplot as plt
|
7
7
|
|
8
|
-
from validmind import tags, tasks
|
8
|
+
from validmind import RawData, tags, tasks
|
9
9
|
|
10
10
|
|
11
11
|
@tags("visualization")
|
@@ -74,4 +74,10 @@ def PredictionAcrossEachFeature(datasets, model):
|
|
74
74
|
figures_to_save.append(fig)
|
75
75
|
plt.close()
|
76
76
|
|
77
|
-
return tuple(figures_to_save)
|
77
|
+
return tuple(figures_to_save), RawData(
|
78
|
+
y_prob_reference=y_prob_reference,
|
79
|
+
y_prob_monitoring=y_prob_monitoring,
|
80
|
+
model=model.input_id,
|
81
|
+
dataset_reference=datasets[0].input_id,
|
82
|
+
dataset_monitoring=datasets[1].input_id,
|
83
|
+
)
|
@@ -5,7 +5,7 @@
|
|
5
5
|
import pandas as pd
|
6
6
|
import plotly.graph_objects as go
|
7
7
|
|
8
|
-
from validmind import tags, tasks
|
8
|
+
from validmind import RawData, tags, tasks
|
9
9
|
|
10
10
|
|
11
11
|
@tags("visualization")
|
@@ -140,4 +140,15 @@ def PredictionCorrelation(datasets, model, drift_pct_threshold=20):
|
|
140
140
|
# Calculate overall pass/fail
|
141
141
|
pass_fail_bool = (corr_final["Pass/Fail"] == "Pass").all()
|
142
142
|
|
143
|
-
return (
|
143
|
+
return (
|
144
|
+
{"Correlation Pair Table": corr_final},
|
145
|
+
fig,
|
146
|
+
pass_fail_bool,
|
147
|
+
RawData(
|
148
|
+
reference_correlations=corr_ref.to_dict(),
|
149
|
+
monitoring_correlations=corr_mon.to_dict(),
|
150
|
+
model=model.input_id,
|
151
|
+
dataset_reference=datasets[0].input_id,
|
152
|
+
dataset_monitoring=datasets[1].input_id,
|
153
|
+
),
|
154
|
+
)
|
@@ -10,7 +10,7 @@ import plotly.graph_objects as go
|
|
10
10
|
from plotly.subplots import make_subplots
|
11
11
|
from scipy import stats
|
12
12
|
|
13
|
-
from validmind import tags, tasks
|
13
|
+
from validmind import RawData, tags, tasks
|
14
14
|
from validmind.vm_models import VMDataset, VMModel
|
15
15
|
|
16
16
|
|
@@ -201,4 +201,15 @@ def PredictionProbabilitiesHistogramDrift(
|
|
201
201
|
}
|
202
202
|
)
|
203
203
|
|
204
|
-
return
|
204
|
+
return (
|
205
|
+
fig,
|
206
|
+
tables,
|
207
|
+
all_passed,
|
208
|
+
RawData(
|
209
|
+
reference_probabilities=y_prob_ref,
|
210
|
+
monitoring_probabilities=y_prob_mon,
|
211
|
+
model=model.input_id,
|
212
|
+
dataset_reference=datasets[0].input_id,
|
213
|
+
dataset_monitoring=datasets[1].input_id,
|
214
|
+
),
|
215
|
+
)
|
@@ -8,7 +8,7 @@ import numpy as np
|
|
8
8
|
import plotly.graph_objects as go
|
9
9
|
from sklearn.metrics import roc_auc_score, roc_curve
|
10
10
|
|
11
|
-
from validmind import tags, tasks
|
11
|
+
from validmind import RawData, tags, tasks
|
12
12
|
from validmind.errors import SkipTestError
|
13
13
|
from validmind.vm_models import VMDataset, VMModel
|
14
14
|
|
@@ -147,4 +147,18 @@ def ROCCurveDrift(datasets: List[VMDataset], model: VMModel):
|
|
147
147
|
height=500,
|
148
148
|
)
|
149
149
|
|
150
|
-
return
|
150
|
+
return (
|
151
|
+
fig1,
|
152
|
+
fig2,
|
153
|
+
RawData(
|
154
|
+
fpr_ref=fpr_ref,
|
155
|
+
tpr_ref=tpr_ref,
|
156
|
+
auc_ref=auc_ref,
|
157
|
+
fpr_mon=fpr_mon,
|
158
|
+
tpr_mon=tpr_mon,
|
159
|
+
auc_mon=auc_mon,
|
160
|
+
model=model.input_id,
|
161
|
+
dataset_reference=datasets[0].input_id,
|
162
|
+
dataset_monitoring=datasets[1].input_id,
|
163
|
+
),
|
164
|
+
)
|
@@ -7,7 +7,7 @@ from typing import List
|
|
7
7
|
import numpy as np
|
8
8
|
import pandas as pd
|
9
9
|
|
10
|
-
from validmind import tags, tasks
|
10
|
+
from validmind import RawData, tags, tasks
|
11
11
|
from validmind.vm_models import VMDataset, VMModel
|
12
12
|
|
13
13
|
|
@@ -209,4 +209,13 @@ def ScoreBandsDrift(
|
|
209
209
|
tables[table_name] = pd.DataFrame(rows)
|
210
210
|
all_passed &= metric_passed
|
211
211
|
|
212
|
-
|
212
|
+
# Collect raw data
|
213
|
+
raw_data = RawData(
|
214
|
+
ref_results=ref_results,
|
215
|
+
mon_results=mon_results,
|
216
|
+
model=model.input_id,
|
217
|
+
dataset_reference=datasets[0].input_id,
|
218
|
+
dataset_monitoring=datasets[1].input_id,
|
219
|
+
)
|
220
|
+
|
221
|
+
return tables, all_passed, raw_data
|
@@ -7,7 +7,7 @@ import plotly.figure_factory as ff
|
|
7
7
|
import plotly.graph_objects as go
|
8
8
|
from scipy.stats import kurtosis, skew
|
9
9
|
|
10
|
-
from validmind import tags, tasks
|
10
|
+
from validmind import RawData, tags, tasks
|
11
11
|
|
12
12
|
|
13
13
|
@tags("visualization")
|
@@ -142,4 +142,15 @@ def TargetPredictionDistributionPlot(datasets, model, drift_pct_threshold=20):
|
|
142
142
|
|
143
143
|
pass_fail_bool = (moments["Pass/Fail"] == "Pass").all()
|
144
144
|
|
145
|
-
return (
|
145
|
+
return (
|
146
|
+
{"Distribution Moments": moments},
|
147
|
+
fig,
|
148
|
+
pass_fail_bool,
|
149
|
+
RawData(
|
150
|
+
pred_ref=pred_ref,
|
151
|
+
pred_monitor=pred_monitor,
|
152
|
+
model=model.input_id,
|
153
|
+
dataset_reference=datasets[0].input_id,
|
154
|
+
dataset_monitoring=datasets[1].input_id,
|
155
|
+
),
|
156
|
+
)
|
@@ -130,5 +130,10 @@ def Robustness(model, dataset, num_tests=10):
|
|
130
130
|
return (
|
131
131
|
results,
|
132
132
|
all(result["Pass/Fail"] == "Pass" for result in results),
|
133
|
-
RawData(
|
133
|
+
RawData(
|
134
|
+
generated_inputs=generated_inputs,
|
135
|
+
responses=responses,
|
136
|
+
model=model.input_id,
|
137
|
+
dataset=dataset.input_id,
|
138
|
+
),
|
134
139
|
)
|
@@ -44,31 +44,12 @@ async def update_metadata(content_id: str, text: str, _json: Union[Dict, List] =
|
|
44
44
|
content_id = parts[0]
|
45
45
|
revision_name = parts[1] if len(parts) > 1 else None
|
46
46
|
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
# if we are updating a metric or test description, we check if the text
|
51
|
-
# has changed from the last time it was logged, and only update if it has
|
52
|
-
if content_id.split(":", 1)[0] in ["metric_description", "test_description"]:
|
53
|
-
try:
|
54
|
-
md = await api_client.aget_metadata(content_id)
|
55
|
-
# if there is an existing description, only update it if the new one
|
56
|
-
# is different and is an AI-generated description
|
57
|
-
should_update = (
|
58
|
-
md["text"] != text if revision_name == AI_REVISION_NAME else False
|
59
|
-
)
|
60
|
-
logger.debug(f"Check if description has changed: {should_update}")
|
61
|
-
except Exception:
|
62
|
-
# if exception, assume its not created yet TODO: don't catch all
|
63
|
-
should_update = True
|
64
|
-
|
65
|
-
if should_update:
|
66
|
-
if revision_name:
|
67
|
-
content_id = f"{content_id}::{revision_name}"
|
47
|
+
if revision_name:
|
48
|
+
content_id = f"{content_id}::{revision_name}"
|
68
49
|
|
69
|
-
|
50
|
+
logger.debug(f"Updating metadata for `{content_id}`")
|
70
51
|
|
71
|
-
|
52
|
+
await api_client.alog_metadata(content_id, text, _json)
|
72
53
|
|
73
54
|
|
74
55
|
def check_for_sensitive_data(data: pd.DataFrame, inputs: List[VMInput]):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: validmind
|
3
|
-
Version: 2.8.
|
3
|
+
Version: 2.8.12
|
4
4
|
Summary: ValidMind Library
|
5
5
|
License: Commercial License
|
6
6
|
Author: Andres Rodriguez
|
@@ -35,7 +35,7 @@ Requires-Dist: numba (<0.59.0)
|
|
35
35
|
Requires-Dist: numpy
|
36
36
|
Requires-Dist: openai (>=1)
|
37
37
|
Requires-Dist: pandas (>=1.1,<=2.0.3)
|
38
|
-
Requires-Dist: plotly
|
38
|
+
Requires-Dist: plotly (<6.0.0)
|
39
39
|
Requires-Dist: plotly-express
|
40
40
|
Requires-Dist: polars
|
41
41
|
Requires-Dist: pycocoevalcap (>=1.2,<2.0) ; extra == "all" or extra == "llm"
|