validmind 2.0.1__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. validmind/__init__.py +6 -3
  2. validmind/__version__.py +1 -1
  3. validmind/ai.py +193 -0
  4. validmind/api_client.py +45 -31
  5. validmind/client.py +33 -6
  6. validmind/datasets/classification/customer_churn.py +2 -2
  7. validmind/datasets/credit_risk/__init__.py +11 -0
  8. validmind/datasets/credit_risk/datasets/lending_club_loan_data_2007_2014_clean.csv.gz +0 -0
  9. validmind/datasets/credit_risk/lending_club.py +394 -0
  10. validmind/datasets/nlp/__init__.py +5 -0
  11. validmind/datasets/nlp/cnn_dailymail.py +98 -0
  12. validmind/datasets/nlp/datasets/cnn_dailymail_100_with_predictions.csv +255 -0
  13. validmind/datasets/nlp/datasets/cnn_dailymail_500_with_predictions.csv +1277 -0
  14. validmind/datasets/nlp/datasets/sentiments_with_predictions.csv +4847 -0
  15. validmind/errors.py +11 -1
  16. validmind/logging.py +9 -2
  17. validmind/models/huggingface.py +2 -2
  18. validmind/models/pytorch.py +3 -3
  19. validmind/models/sklearn.py +4 -4
  20. validmind/template.py +2 -2
  21. validmind/test_suites/__init__.py +4 -2
  22. validmind/tests/__init__.py +130 -45
  23. validmind/tests/data_validation/DatasetDescription.py +0 -1
  24. validmind/tests/data_validation/FeatureTargetCorrelationPlot.py +3 -1
  25. validmind/tests/data_validation/PiTCreditScoresHistogram.py +1 -1
  26. validmind/tests/data_validation/ScatterPlot.py +8 -2
  27. validmind/tests/data_validation/nlp/StopWords.py +1 -6
  28. validmind/tests/data_validation/nlp/TextDescription.py +20 -9
  29. validmind/tests/decorator.py +313 -0
  30. validmind/tests/model_validation/BertScore.py +1 -1
  31. validmind/tests/model_validation/BertScoreAggregate.py +1 -1
  32. validmind/tests/model_validation/BleuScore.py +1 -1
  33. validmind/tests/model_validation/ClusterSizeDistribution.py +1 -1
  34. validmind/tests/model_validation/ContextualRecall.py +1 -1
  35. validmind/tests/model_validation/FeaturesAUC.py +110 -0
  36. validmind/tests/model_validation/MeteorScore.py +92 -0
  37. validmind/tests/model_validation/RegardHistogram.py +6 -7
  38. validmind/tests/model_validation/RegardScore.py +4 -6
  39. validmind/tests/model_validation/RegressionResidualsPlot.py +127 -0
  40. validmind/tests/model_validation/RougeMetrics.py +7 -5
  41. validmind/tests/model_validation/RougeMetricsAggregate.py +1 -1
  42. validmind/tests/model_validation/SelfCheckNLIScore.py +112 -0
  43. validmind/tests/model_validation/TokenDisparity.py +1 -1
  44. validmind/tests/model_validation/ToxicityHistogram.py +1 -1
  45. validmind/tests/model_validation/ToxicityScore.py +1 -1
  46. validmind/tests/model_validation/embeddings/ClusterDistribution.py +1 -1
  47. validmind/tests/model_validation/embeddings/CosineSimilarityDistribution.py +1 -3
  48. validmind/tests/model_validation/embeddings/DescriptiveAnalytics.py +17 -22
  49. validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py +1 -1
  50. validmind/tests/model_validation/sklearn/ClassifierPerformance.py +16 -17
  51. validmind/tests/model_validation/sklearn/ClusterCosineSimilarity.py +1 -1
  52. validmind/tests/model_validation/sklearn/ClusterPerformance.py +2 -2
  53. validmind/tests/model_validation/sklearn/ConfusionMatrix.py +21 -3
  54. validmind/tests/model_validation/sklearn/MinimumAccuracy.py +1 -1
  55. validmind/tests/model_validation/sklearn/MinimumF1Score.py +1 -1
  56. validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py +1 -1
  57. validmind/tests/model_validation/sklearn/ModelsPerformanceComparison.py +5 -4
  58. validmind/tests/model_validation/sklearn/OverfitDiagnosis.py +2 -2
  59. validmind/tests/model_validation/sklearn/ROCCurve.py +6 -12
  60. validmind/tests/model_validation/sklearn/RegressionErrors.py +2 -2
  61. validmind/tests/model_validation/sklearn/RegressionModelsPerformanceComparison.py +6 -4
  62. validmind/tests/model_validation/sklearn/RegressionR2Square.py +2 -2
  63. validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +55 -5
  64. validmind/tests/model_validation/sklearn/SilhouettePlot.py +1 -1
  65. validmind/tests/model_validation/sklearn/TrainingTestDegradation.py +11 -5
  66. validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py +2 -2
  67. validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py +140 -0
  68. validmind/tests/model_validation/statsmodels/GINITable.py +22 -45
  69. validmind/tests/model_validation/statsmodels/{LogisticRegPredictionHistogram.py → PredictionProbabilitiesHistogram.py} +67 -92
  70. validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py +2 -2
  71. validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py +2 -2
  72. validmind/tests/model_validation/statsmodels/RegressionModelInsampleComparison.py +1 -1
  73. validmind/tests/model_validation/statsmodels/RegressionModelOutsampleComparison.py +1 -1
  74. validmind/tests/model_validation/statsmodels/RegressionModelSummary.py +1 -1
  75. validmind/tests/model_validation/statsmodels/RegressionModelsPerformance.py +2 -2
  76. validmind/tests/model_validation/statsmodels/RegressionPermutationFeatureImportance.py +128 -0
  77. validmind/tests/model_validation/statsmodels/ScorecardHistogram.py +70 -103
  78. validmind/tests/prompt_validation/ai_powered_test.py +2 -0
  79. validmind/tests/test_providers.py +14 -124
  80. validmind/unit_metrics/__init__.py +75 -70
  81. validmind/unit_metrics/classification/sklearn/Accuracy.py +14 -0
  82. validmind/unit_metrics/classification/sklearn/F1.py +13 -0
  83. validmind/unit_metrics/classification/sklearn/Precision.py +13 -0
  84. validmind/unit_metrics/classification/sklearn/ROC_AUC.py +13 -0
  85. validmind/unit_metrics/classification/sklearn/Recall.py +13 -0
  86. validmind/unit_metrics/composite.py +228 -0
  87. validmind/unit_metrics/regression/GiniCoefficient.py +33 -0
  88. validmind/unit_metrics/regression/HuberLoss.py +23 -0
  89. validmind/unit_metrics/regression/KolmogorovSmirnovStatistic.py +30 -0
  90. validmind/unit_metrics/regression/MeanAbsolutePercentageError.py +16 -0
  91. validmind/unit_metrics/regression/MeanBiasDeviation.py +13 -0
  92. validmind/unit_metrics/regression/QuantileLoss.py +15 -0
  93. validmind/unit_metrics/regression/sklearn/AdjustedRSquaredScore.py +21 -0
  94. validmind/unit_metrics/regression/sklearn/MeanAbsoluteError.py +13 -0
  95. validmind/unit_metrics/regression/sklearn/MeanSquaredError.py +13 -0
  96. validmind/unit_metrics/regression/sklearn/RSquaredScore.py +13 -0
  97. validmind/unit_metrics/regression/sklearn/RootMeanSquaredError.py +20 -0
  98. validmind/utils.py +20 -31
  99. validmind/vm_models/__init__.py +0 -2
  100. validmind/vm_models/dataset.py +623 -29
  101. validmind/vm_models/figure.py +52 -17
  102. validmind/vm_models/test/metric.py +33 -31
  103. validmind/vm_models/test/output_template.py +0 -27
  104. validmind/vm_models/test/result_wrapper.py +68 -36
  105. validmind/vm_models/test/test.py +4 -2
  106. validmind/vm_models/test/threshold_test.py +24 -14
  107. validmind/vm_models/test_context.py +7 -0
  108. validmind/vm_models/test_suite/runner.py +1 -1
  109. validmind/vm_models/test_suite/summary.py +3 -3
  110. validmind/vm_models/test_suite/test.py +1 -1
  111. validmind/vm_models/test_suite/test_suite.py +2 -1
  112. {validmind-2.0.1.dist-info → validmind-2.1.0.dist-info}/METADATA +18 -18
  113. {validmind-2.0.1.dist-info → validmind-2.1.0.dist-info}/RECORD +116 -94
  114. validmind-2.1.0.dist-info/entry_points.txt +3 -0
  115. validmind/tests/__types__.py +0 -62
  116. validmind/tests/model_validation/statsmodels/LogRegressionConfusionMatrix.py +0 -128
  117. validmind/tests/model_validation/statsmodels/LogisticRegCumulativeProb.py +0 -172
  118. validmind/tests/model_validation/statsmodels/ScorecardBucketHistogram.py +0 -181
  119. validmind/tests/model_validation/statsmodels/ScorecardProbabilitiesHistogram.py +0 -175
  120. validmind/unit_metrics/sklearn/classification/Accuracy.py +0 -20
  121. validmind/unit_metrics/sklearn/classification/F1.py +0 -22
  122. validmind/unit_metrics/sklearn/classification/Precision.py +0 -22
  123. validmind/unit_metrics/sklearn/classification/ROC_AUC.py +0 -20
  124. validmind/unit_metrics/sklearn/classification/Recall.py +0 -20
  125. validmind/vm_models/test/unit_metric.py +0 -88
  126. {validmind-2.0.1.dist-info → validmind-2.1.0.dist-info}/LICENSE +0 -0
  127. {validmind-2.0.1.dist-info → validmind-2.1.0.dist-info}/WHEEL +0 -0
@@ -103,8 +103,8 @@ class RegressionModelForecastPlotLevels(Metric):
103
103
  train_ds = datasets[0]
104
104
  test_ds = datasets[1]
105
105
 
106
- y_pred = train_ds.y_pred(fitted_model.input_id)
107
- y_pred_test = test_ds.y_pred(fitted_model.input_id)
106
+ y_pred = train_ds.y_pred(fitted_model)
107
+ y_pred_test = test_ds.y_pred(fitted_model)
108
108
 
109
109
  all_dates = pd.concat([pd.Series(train_ds.index), pd.Series(test_ds.index)])
110
110
 
@@ -108,7 +108,7 @@ class RegressionModelInsampleComparison(Metric):
108
108
  for i, model in enumerate(models):
109
109
  X_columns = dataset.get_features_columns()
110
110
  y_true = dataset.y
111
- y_pred = dataset.y_pred(model.model_id)
111
+ y_pred = dataset.y_pred(model)
112
112
 
113
113
  # Extract R-squared and Adjusted R-squared
114
114
  r2 = r2_score(y_true, y_pred)
@@ -102,7 +102,7 @@ class RegressionModelOutsampleComparison(Metric):
102
102
  y_test = dataset.y
103
103
 
104
104
  # Predict the test data
105
- y_pred = dataset.y_pred(fitted_model.input_id)
105
+ y_pred = dataset.y_pred(fitted_model)
106
106
 
107
107
  # Calculate the residuals
108
108
  residuals = y_test - y_pred
@@ -60,7 +60,7 @@ class RegressionModelSummary(Metric):
60
60
  X_columns = self.inputs.dataset.get_features_columns()
61
61
 
62
62
  y_true = self.inputs.dataset.y
63
- y_pred = self.inputs.dataset.y_pred(self.inputs.model.input_id)
63
+ y_pred = self.inputs.dataset.y_pred(self.inputs.model)
64
64
 
65
65
  r2 = r2_score(y_true, y_pred)
66
66
  adj_r2 = adj_r2_score(y_true, y_pred, len(y_true), len(X_columns))
@@ -79,10 +79,10 @@ class RegressionModelsPerformance(Metric):
79
79
  def sample_performance_ols(self, models, datasets):
80
80
  evaluation_results = []
81
81
 
82
- for (model, dataset) in zip(models, datasets):
82
+ for model, dataset in zip(models, datasets):
83
83
  X_columns = dataset.get_features_columns()
84
84
  y_true = dataset.y
85
- y_pred = dataset.y_pred(model.input_id)
85
+ y_pred = dataset.y_pred(model)
86
86
 
87
87
  # Extract R-squared and Adjusted R-squared
88
88
  r2 = r2_score(y_true, y_pred)
@@ -0,0 +1,128 @@
1
+ # Copyright © 2023-2024 ValidMind Inc. All rights reserved.
2
+ # See the LICENSE file in the root of this repository for details.
3
+ # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
4
+
5
+ from dataclasses import dataclass
6
+
7
+ import numpy as np
8
+ import pandas as pd
9
+ import plotly.graph_objects as go
10
+ from sklearn.metrics import r2_score
11
+ from sklearn.utils import check_random_state
12
+
13
+ from validmind.errors import SkipTestError
14
+ from validmind.logging import get_logger
15
+ from validmind.vm_models import Figure, Metric
16
+
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ @dataclass
21
+ class RegressionPermutationFeatureImportance(Metric):
22
+ """
23
+ Assesses the significance of each feature in a model by evaluating the impact on model performance when feature
24
+ values are randomly rearranged. Specifically designed for use with statsmodels, this metric offers insight into the
25
+ importance of features based on the decrease in model's predictive accuracy, typically R².
26
+
27
+ **Purpose**: The primary purpose of this metric is to determine which features significantly impact the performance
28
+ of a regression model developed using statsmodels. The metric measures how much the prediction accuracy deteriorates
29
+ when each feature's values are permuted.
30
+
31
+ **Test Mechanism**: This metric shuffles the values of each feature one at a time in the dataset, computes the model's
32
+ performance after each permutation, and compares it to the baseline performance. A significant decrease in performance
33
+ indicates the importance of the feature.
34
+
35
+ **Signs of High Risk**:
36
+ - Significant reliance on a feature that when permuted leads to a substantial decrease in performance, suggesting
37
+ overfitting or high model dependency on that feature.
38
+ - Features identified as unimportant despite known impacts from domain knowledge, suggesting potential issues in
39
+ model training or data preprocessing.
40
+
41
+ **Strengths**:
42
+ - Directly assesses the impact of each feature on model performance, providing clear insights into model dependencies.
43
+ - Model-agnostic within the scope of statsmodels, applicable to any regression model that outputs predictions.
44
+
45
+ **Limitations**:
46
+ - The metric is specific to statsmodels and cannot be used with other types of models without adaptation.
47
+ - It does not capture interactions between features, which can lead to underestimating the importance of correlated
48
+ features.
49
+ - Assumes independence of features when calculating importance, which might not always hold true.
50
+ """
51
+
52
+ name = "regression_pfi"
53
+ required_inputs = ["model", "dataset"]
54
+ default_params = {
55
+ "fontsize": 12,
56
+ "figure_height": 500,
57
+ }
58
+ metadata = {
59
+ "task_types": ["regression"],
60
+ "tags": [
61
+ "statsmodels",
62
+ "feature_importance",
63
+ "visualization",
64
+ ],
65
+ }
66
+
67
+ def run(self):
68
+ x = self.inputs.dataset.x_df()
69
+ y = self.inputs.dataset.y_df()
70
+
71
+ model = self.inputs.model.model
72
+ if not hasattr(model, "predict"):
73
+ raise SkipTestError(
74
+ "Model does not support 'predict' method required for PFI"
75
+ )
76
+
77
+ # Calculate baseline performance
78
+ baseline_performance = r2_score(y, model.predict(x))
79
+ importances = pd.DataFrame(index=x.columns, columns=["Importance", "Std Dev"])
80
+
81
+ for column in x.columns:
82
+ shuffled_scores = []
83
+ for _ in range(30): # Default number of shuffles
84
+ x_shuffled = x.copy()
85
+ x_shuffled[column] = check_random_state(0).permutation(
86
+ x_shuffled[column]
87
+ )
88
+ permuted_performance = r2_score(y, model.predict(x_shuffled))
89
+ shuffled_scores.append(baseline_performance - permuted_performance)
90
+
91
+ importances.loc[column] = {
92
+ "Importance": np.mean(shuffled_scores),
93
+ "Std Dev": np.std(shuffled_scores),
94
+ }
95
+
96
+ sorted_idx = importances["Importance"].argsort()
97
+
98
+ # Plotting the results
99
+ fig = go.Figure()
100
+ fig.add_trace(
101
+ go.Bar(
102
+ y=importances.index[sorted_idx],
103
+ x=importances.loc[importances.index[sorted_idx], "Importance"],
104
+ orientation="h",
105
+ error_x=dict(
106
+ type="data",
107
+ array=importances.loc[importances.index[sorted_idx], "Std Dev"],
108
+ ),
109
+ )
110
+ )
111
+ fig.update_layout(
112
+ title_text="Permutation Feature Importances",
113
+ yaxis=dict(
114
+ tickmode="linear", dtick=1, tickfont=dict(size=self.params["fontsize"])
115
+ ),
116
+ height=self.params["figure_height"],
117
+ )
118
+
119
+ return self.cache_results(
120
+ metric_value=importances.to_dict(),
121
+ figures=[
122
+ Figure(
123
+ for_object=self,
124
+ key="regression_pfi",
125
+ figure=fig,
126
+ ),
127
+ ],
128
+ )
@@ -4,10 +4,8 @@
4
4
 
5
5
  from dataclasses import dataclass
6
6
 
7
- import numpy as np
8
- import pandas as pd
9
7
  import plotly.graph_objects as go
10
- from plotly.subplots import make_subplots
8
+ from matplotlib import cm
11
9
 
12
10
  from validmind.vm_models import Figure, Metric
13
11
 
@@ -53,120 +51,89 @@ class ScorecardHistogram(Metric):
53
51
  """
54
52
 
55
53
  name = "scorecard_histogram"
56
- required_inputs = ["model", "datasets"]
54
+ required_inputs = ["datasets"]
57
55
  metadata = {
58
56
  "task_types": ["classification"],
59
57
  "tags": ["tabular_data", "visualization", "credit_risk"],
60
58
  }
61
59
  default_params = {
62
60
  "title": "Histogram of Scores",
63
- "target_score": 600,
64
- "target_odds": 50,
65
- "pdo": 20,
61
+ "score_column": "score",
66
62
  }
67
63
 
68
64
  @staticmethod
69
- def compute_scores(model, X, target_score, target_odds, pdo):
70
- X_copy = X.copy()
71
- beta = model.model.params.values
72
- alpha = model.model.params[0]
73
- factor = pdo / np.log(2)
74
- offset = target_score - (factor * np.log(target_odds))
75
-
76
- for _, row in X_copy.iterrows():
77
- score_i = 0
78
- for i in range(1, len(beta)):
79
- WoE_i = row[i]
80
- score_i += (beta[i] * WoE_i) * factor
81
-
82
- score_i += alpha * factor
83
- score_i += offset
84
- X_copy.loc[row.name, "score"] = score_i
85
-
86
- return X_copy
87
-
88
- @staticmethod
89
- def plot_score_histogram(df_train, df_test, score_col, target_col, title):
90
- scores_train_0 = df_train[df_train[target_col] == 0][score_col]
91
- scores_train_1 = df_train[df_train[target_col] == 1][score_col]
92
- scores_test_0 = df_test[df_test[target_col] == 0][score_col]
93
- scores_test_1 = df_test[df_test[target_col] == 1][score_col]
94
-
95
- fig = make_subplots(rows=1, cols=2, subplot_titles=("Train Data", "Test Data"))
96
-
97
- trace_train_0 = go.Histogram(
98
- x=scores_train_0, opacity=0.75, name=f"Train {target_col} = 0"
99
- )
100
- trace_train_1 = go.Histogram(
101
- x=scores_train_1, opacity=0.75, name=f"Train {target_col} = 1"
102
- )
103
- trace_test_0 = go.Histogram(
104
- x=scores_test_0, opacity=0.75, name=f"Test {target_col} = 0"
105
- )
106
- trace_test_1 = go.Histogram(
107
- x=scores_test_1, opacity=0.75, name=f"Test {target_col} = 1"
108
- )
109
-
110
- fig.add_trace(trace_train_0, row=1, col=1)
111
- fig.add_trace(trace_train_1, row=1, col=1)
112
- fig.add_trace(trace_test_0, row=1, col=2)
113
- fig.add_trace(trace_test_1, row=1, col=2)
114
-
115
- fig.update_layout(barmode="overlay", title_text=title)
116
-
117
- return fig
65
+ def plot_score_histogram(dataframes, dataset_titles, score_col, target_col, title):
66
+ figures = []
67
+ # Generate a colormap and convert to Plotly-accepted color format
68
+ # Adjust 'viridis' to any other matplotlib colormap if desired
69
+ colormap = cm.get_cmap("viridis")
70
+
71
+ for _, (df, dataset_title) in enumerate(zip(dataframes, dataset_titles)):
72
+ fig = go.Figure()
73
+
74
+ # Get unique classes and assign colors
75
+ classes = sorted(df[target_col].unique())
76
+ colors = [
77
+ colormap(i / len(classes))[:3] for i in range(len(classes))
78
+ ] # RGB
79
+ color_dict = {
80
+ cls: f"rgb({int(rgb[0]*255)}, {int(rgb[1]*255)}, {int(rgb[2]*255)})"
81
+ for cls, rgb in zip(classes, colors)
82
+ }
83
+
84
+ for class_value in sorted(df[target_col].unique()):
85
+ scores_class = df[df[target_col] == class_value][score_col]
86
+ fig.add_trace(
87
+ go.Histogram(
88
+ x=scores_class,
89
+ opacity=0.75,
90
+ name=f"{dataset_title} {target_col} = {class_value}",
91
+ marker=dict(
92
+ color=color_dict[class_value],
93
+ ),
94
+ )
95
+ )
96
+ fig.update_layout(
97
+ barmode="overlay",
98
+ title_text=f"{title} - {dataset_title}",
99
+ xaxis_title="Score",
100
+ yaxis_title="Frequency",
101
+ legend_title=target_col,
102
+ )
103
+ figures.append(fig)
104
+ return figures
118
105
 
119
106
  def run(self):
120
- model = (
121
- self.inputs.model[0]
122
- if isinstance(self.inputs.model, list)
123
- else self.inputs.model
124
- )
125
-
126
- target_column = model.train_ds.target_column
127
107
  title = self.params["title"]
128
- target_score = self.params["target_score"]
129
- target_odds = self.params["target_odds"]
130
- pdo = self.params["pdo"]
131
-
132
- # Create a copy of training and testing dataframes
133
- df_train = self.inputs.datasets[0].df.copy()
134
- df_test = self.inputs.datasets[1].df.copy()
135
-
136
- # Drop target_column to create feature dataframes
137
- X_train = df_train.drop(columns=[target_column])
138
- X_test = df_test.drop(columns=[target_column])
108
+ score_column = self.params["score_column"]
109
+ dataset_titles = [dataset.input_id for dataset in self.inputs.datasets]
110
+ target_column = self.inputs.datasets[0].target_column
111
+
112
+ dataframes = []
113
+ metric_value = {"score_histogram": {}}
114
+ for dataset in self.inputs.datasets:
115
+ df = dataset.df.copy()
116
+ # Check if the score_column exists in the DataFrame
117
+ if score_column not in df.columns:
118
+ raise ValueError(
119
+ f"The required column '{score_column}' is not present in the dataset with input_id {dataset.input_id}"
120
+ )
139
121
 
140
- # Subset only target_column to create target dataframes
141
- y_train = df_train[[target_column]]
142
- y_test = df_test[[target_column]]
122
+ df[score_column] = dataset.get_extra_column(score_column)
123
+ dataframes.append(df)
124
+ metric_value["score_histogram"][dataset.input_id] = list(df[score_column])
143
125
 
144
- X_train_scores = self.compute_scores(
145
- model, X_train, target_score, target_odds, pdo
146
- )
147
- X_test_scores = self.compute_scores(
148
- model, X_test, target_score, target_odds, pdo
126
+ figures = self.plot_score_histogram(
127
+ dataframes, dataset_titles, score_column, target_column, title
149
128
  )
150
129
 
151
- df_train = pd.concat([X_train_scores, y_train], axis=1)
152
- df_test = pd.concat([X_test_scores, y_test], axis=1)
153
-
154
- fig = self.plot_score_histogram(
155
- df_train, df_test, "score", target_column, title
156
- )
130
+ figures_list = [
131
+ Figure(
132
+ for_object=self,
133
+ key=f"score_histogram_{title.replace(' ', '_')}_{i+1}",
134
+ figure=fig,
135
+ )
136
+ for i, fig in enumerate(figures)
137
+ ]
157
138
 
158
- return self.cache_results(
159
- metric_value={
160
- "score_histogram": {
161
- "train_scores": list(X_train_scores["score"]),
162
- "test_scores": list(X_test_scores["score"]),
163
- },
164
- },
165
- figures=[
166
- Figure(
167
- for_object=self,
168
- key="score_histogram",
169
- figure=fig,
170
- )
171
- ],
172
- )
139
+ return self.cache_results(metric_value=metric_value, figures=figures_list)
@@ -57,6 +57,8 @@ class AIPoweredTest:
57
57
  {"role": "system", "content": system_prompt},
58
58
  {"role": "user", "content": user_prompt},
59
59
  ],
60
+ temperature=0.0,
61
+ seed=42,
60
62
  )
61
63
  .choices[0]
62
64
  .message.content
@@ -5,44 +5,30 @@
5
5
  import importlib.util
6
6
  import os
7
7
  import sys
8
-
9
- import requests
8
+ from typing import Protocol
10
9
 
11
10
  from validmind.logging import get_logger
12
11
 
13
12
  logger = get_logger(__name__)
14
13
 
15
14
 
16
- class GithubTestProviderDownloadError(Exception):
17
- """
18
- When the remote file can't be downloaded from the repo.
19
- """
20
-
21
- pass
22
-
15
+ class TestProvider(Protocol):
16
+ """Protocol for user-defined test providers"""
23
17
 
24
- class GithubTestProviderWriteFileError(Exception):
25
- """
26
- When the remote file can't be downloaded from the repo.
27
- """
28
-
29
- pass
30
-
31
-
32
- class GithubTestProviderLoadModuleError(Exception):
33
- """
34
- When the remote file was downloaded but the module can't be loaded.
35
- """
36
-
37
- pass
18
+ def load_test(self, test_id: str):
19
+ """Load the test by test ID
38
20
 
21
+ Args:
22
+ test_id (str): The test ID (does not contain the namespace under which
23
+ the test is registered)
39
24
 
40
- class GithubTestProviderLoadTestError(Exception):
41
- """
42
- When the module was loaded but the test class can't be located.
43
- """
25
+ Returns:
26
+ Test: A test class or function
44
27
 
45
- pass
28
+ Raises:
29
+ FileNotFoundError: If the test is not found
30
+ """
31
+ ...
46
32
 
47
33
 
48
34
  class LocalTestProviderLoadModuleError(Exception):
@@ -61,102 +47,6 @@ class LocalTestProviderLoadTestError(Exception):
61
47
  pass
62
48
 
63
49
 
64
- class GithubTestProvider:
65
- """
66
- A class used to download python files from a Github repository and
67
- dynamically load and execute the tests from those files.
68
- """
69
-
70
- BASE_URL = "https://api.github.com/repos"
71
-
72
- def __init__(self, org: str, repo: str, token: str):
73
- """
74
- Initialize the GithubTestProvider with the given org, repo, and token.
75
-
76
- Args:
77
- org (str): The Github organization.
78
- repo (str): The Github repository.
79
- token (str): The Github access token.
80
- """
81
- self.org = org
82
- self.repo = repo
83
- self.token = token
84
-
85
- def _download_file(self, test_path: str) -> str:
86
- """
87
- Download the file at the given test_path from the Github repository.
88
-
89
- Args:
90
- test_path (str): The path of the file in the repository.
91
-
92
- Returns:
93
- str: The local file path where the file was downloaded.
94
-
95
- Raises:
96
- Exception: If the file can't be downloaded or written.
97
- """
98
- url = f"{self.BASE_URL}/{self.org}/{self.repo}/contents/{test_path}"
99
-
100
- headers = {
101
- "Authorization": f"token {self.token}",
102
- "Accept": "application/vnd.github.v3.raw",
103
- "X-Github-Api-Version": "2022-11-28",
104
- }
105
-
106
- try:
107
- response = requests.get(url, headers=headers)
108
- response.raise_for_status()
109
- except requests.RequestException as e:
110
- raise GithubTestProviderDownloadError(
111
- f"Failed to download the file at {url}. Error: {str(e)}"
112
- )
113
-
114
- file_path = f"/tmp/{os.path.basename(test_path)}"
115
- try:
116
- with open(file_path, "w") as file:
117
- file.write(response.text)
118
- except IOError as e:
119
- raise GithubTestProviderWriteFileError(
120
- f"Failed to write the file to {file_path}. Error: {str(e)}"
121
- )
122
-
123
- return file_path
124
-
125
- def load_test(self, test_id):
126
- """
127
- Load the test identified by the given test_id.
128
-
129
- Args:
130
- test_id (str): The identifier of the test. This corresponds to the
131
- relative path of the python file in the repository, with slashes replaced by dots.
132
-
133
- Returns:
134
- The test class that matches the last part of the test_id.
135
-
136
- Raises:
137
- Exception: If the test can't be imported or loaded.
138
- """
139
- test_path = f"{test_id.replace('.', '/')}.py"
140
- file_path = self._download_file(test_path)
141
-
142
- try:
143
- spec = importlib.util.spec_from_file_location(test_id, file_path)
144
- module = importlib.util.module_from_spec(spec)
145
- spec.loader.exec_module(module)
146
- except Exception as e:
147
- raise GithubTestProviderLoadModuleError(
148
- f"Failed to load the module from {file_path}. Error: {str(e)}"
149
- )
150
-
151
- try:
152
- # find the test class that matches the last part of the test_id
153
- return getattr(module, test_id.split(".")[-1])
154
- except AttributeError as e:
155
- raise GithubTestProviderLoadTestError(
156
- f"Failed to find the test class in the module. Error: {str(e)}"
157
- )
158
-
159
-
160
50
  class LocalTestProvider:
161
51
  """
162
52
  Test providers in ValidMind are responsible for loading tests from different sources,