validmind 2.5.15__py3-none-any.whl → 2.5.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- validmind/__version__.py +1 -1
- validmind/ai/test_descriptions.py +54 -112
- validmind/ai/test_result_description/config.yaml +29 -0
- validmind/ai/test_result_description/context.py +73 -0
- validmind/ai/test_result_description/image_processing.py +124 -0
- validmind/ai/test_result_description/system.jinja +39 -0
- validmind/ai/test_result_description/user.jinja +25 -0
- validmind/datasets/credit_risk/__init__.py +1 -0
- validmind/datasets/credit_risk/datasets/lending_club_biased.csv.gz +0 -0
- validmind/datasets/credit_risk/lending_club_bias.py +142 -0
- validmind/errors.py +17 -0
- validmind/tests/__types__.py +19 -10
- validmind/tests/{model_validation/statsmodels → data_validation}/BoxPierce.py +20 -24
- validmind/tests/data_validation/ChiSquaredFeaturesTable.py +4 -1
- validmind/tests/{model_validation/statsmodels → data_validation}/JarqueBera.py +22 -30
- validmind/tests/{model_validation/statsmodels → data_validation}/LJungBox.py +23 -27
- validmind/tests/data_validation/ProtectedClassesCombination.py +205 -0
- validmind/tests/data_validation/ProtectedClassesDescription.py +130 -0
- validmind/tests/data_validation/ProtectedClassesDisparity.py +141 -0
- validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py +180 -0
- validmind/tests/{model_validation/statsmodels → data_validation}/RunsTest.py +17 -20
- validmind/tests/{model_validation/statsmodels → data_validation}/ShapiroWilk.py +20 -22
- validmind/tests/data_validation/nlp/Hashtags.py +15 -20
- validmind/tests/data_validation/nlp/TextDescription.py +3 -1
- validmind/tests/load.py +21 -5
- validmind/tests/model_validation/ContextualRecall.py +3 -0
- validmind/tests/model_validation/ragas/AnswerCorrectness.py +12 -5
- validmind/tests/model_validation/ragas/AnswerRelevance.py +12 -6
- validmind/tests/model_validation/ragas/AnswerSimilarity.py +12 -6
- validmind/tests/model_validation/ragas/AspectCritique.py +22 -17
- validmind/tests/model_validation/ragas/ContextEntityRecall.py +12 -6
- validmind/tests/model_validation/ragas/ContextPrecision.py +12 -6
- validmind/tests/model_validation/ragas/ContextRecall.py +12 -6
- validmind/tests/model_validation/ragas/ContextUtilization.py +161 -0
- validmind/tests/model_validation/ragas/Faithfulness.py +12 -6
- validmind/tests/model_validation/ragas/NoiseSensitivity.py +158 -0
- validmind/tests/model_validation/sklearn/FeatureImportance.py +3 -3
- validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py +1 -1
- validmind/tests/model_validation/sklearn/RegressionR2Square.py +1 -2
- validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +59 -0
- validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py +40 -20
- validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py +0 -1
- validmind/tests/model_validation/statsmodels/RegressionCoeffs.py +1 -1
- validmind/utils.py +4 -0
- validmind/vm_models/test/metric.py +1 -0
- validmind/vm_models/test/result_wrapper.py +50 -26
- validmind/vm_models/test/threshold_test.py +1 -0
- {validmind-2.5.15.dist-info → validmind-2.5.19.dist-info}/METADATA +4 -3
- {validmind-2.5.15.dist-info → validmind-2.5.19.dist-info}/RECORD +52 -39
- {validmind-2.5.15.dist-info → validmind-2.5.19.dist-info}/WHEEL +1 -1
- {validmind-2.5.15.dist-info → validmind-2.5.19.dist-info}/LICENSE +0 -0
- {validmind-2.5.15.dist-info → validmind-2.5.19.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,130 @@
|
|
1
|
+
# Copyright © 2023-2024 ValidMind Inc. All rights reserved.
|
2
|
+
# See the LICENSE file in the root of this repository for details.
|
3
|
+
# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
|
4
|
+
|
5
|
+
|
6
|
+
import pandas as pd
|
7
|
+
import plotly.graph_objects as go
|
8
|
+
|
9
|
+
from validmind import tags, tasks
|
10
|
+
from validmind.logging import get_logger
|
11
|
+
|
12
|
+
logger = get_logger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
@tags("bias_and_fairness", "descriptive_statistics")
|
16
|
+
@tasks("classification", "regression")
|
17
|
+
def ProtectedClassesDescription(dataset, protected_classes=None):
|
18
|
+
"""
|
19
|
+
Visualizes the distribution of protected classes in the dataset relative to the target variable
|
20
|
+
and provides descriptive statistics.
|
21
|
+
|
22
|
+
### Purpose
|
23
|
+
|
24
|
+
The ProtectedClassesDescription test aims to identify potential biases or significant differences in the
|
25
|
+
distribution of target outcomes across different protected classes. This visualization and statistical summary
|
26
|
+
help in understanding the relationship between protected attributes and the target variable, which is crucial
|
27
|
+
for assessing fairness in machine learning models.
|
28
|
+
|
29
|
+
### Test Mechanism
|
30
|
+
|
31
|
+
The function creates interactive stacked bar charts for each specified protected class using Plotly.
|
32
|
+
Additionally, it generates a single table of descriptive statistics for all protected classes, including:
|
33
|
+
- Protected class and category
|
34
|
+
- Count and percentage of each category within the protected class
|
35
|
+
- Mean, median, and mode of the target variable for each category
|
36
|
+
- Standard deviation of the target variable for each category
|
37
|
+
- Minimum and maximum values of the target variable for each category
|
38
|
+
|
39
|
+
### Signs of High Risk
|
40
|
+
|
41
|
+
- Significant imbalances in the distribution of target outcomes across different categories of a protected class.
|
42
|
+
- Large disparities in mean, median, or mode of the target variable across categories.
|
43
|
+
- Underrepresentation or overrepresentation of certain groups within protected classes.
|
44
|
+
- High standard deviations in certain categories, indicating potential volatility or outliers.
|
45
|
+
|
46
|
+
### Strengths
|
47
|
+
|
48
|
+
- Provides both visual and statistical representation of potential biases in the dataset.
|
49
|
+
- Allows for easy identification of imbalances in target variable distribution across protected classes.
|
50
|
+
- Interactive plots enable detailed exploration of the data.
|
51
|
+
- Consolidated statistical summary provides quantitative measures to complement visual analysis.
|
52
|
+
- Applicable to both classification and regression tasks.
|
53
|
+
|
54
|
+
### Limitations
|
55
|
+
|
56
|
+
- Does not provide advanced statistical measures of bias or fairness.
|
57
|
+
- May become cluttered if there are many categories within a protected class or many unique target values.
|
58
|
+
- Interpretation may require domain expertise to understand the implications of observed disparities.
|
59
|
+
- Does not account for intersectionality or complex interactions between multiple protected attributes.
|
60
|
+
"""
|
61
|
+
|
62
|
+
if protected_classes is None:
|
63
|
+
logger.warning(
|
64
|
+
"No protected classes provided. Please pass the 'protected_classes' parameter to run this test."
|
65
|
+
)
|
66
|
+
return pd.DataFrame()
|
67
|
+
|
68
|
+
figures = []
|
69
|
+
all_stats = []
|
70
|
+
|
71
|
+
df = dataset._df
|
72
|
+
target = dataset.target_column
|
73
|
+
|
74
|
+
for protected_class in protected_classes:
|
75
|
+
# Create the stacked bar chart
|
76
|
+
counts = df.groupby([protected_class, target]).size().unstack(fill_value=0)
|
77
|
+
fig = go.Figure()
|
78
|
+
for col in counts.columns:
|
79
|
+
fig.add_trace(
|
80
|
+
go.Bar(
|
81
|
+
x=counts.index,
|
82
|
+
y=counts[col],
|
83
|
+
name=str(col),
|
84
|
+
text=counts[col],
|
85
|
+
textposition="auto",
|
86
|
+
)
|
87
|
+
)
|
88
|
+
|
89
|
+
fig.update_layout(
|
90
|
+
title=f"Distribution of {protected_class} by {target}",
|
91
|
+
xaxis_title=protected_class,
|
92
|
+
yaxis_title="Count",
|
93
|
+
barmode="stack",
|
94
|
+
showlegend=True,
|
95
|
+
legend_title=target,
|
96
|
+
)
|
97
|
+
|
98
|
+
figures.append(fig)
|
99
|
+
|
100
|
+
# Get unique values in the target column
|
101
|
+
target_labels = df[target].unique()
|
102
|
+
|
103
|
+
for category in df[protected_class].unique():
|
104
|
+
category_data = df[df[protected_class] == category]
|
105
|
+
stats = {
|
106
|
+
"Protected Class": protected_class,
|
107
|
+
"Category": category,
|
108
|
+
"Count": len(category_data),
|
109
|
+
"Percentage": len(category_data) / len(df) * 100,
|
110
|
+
}
|
111
|
+
|
112
|
+
# Add mean for each target label
|
113
|
+
for label in target_labels:
|
114
|
+
label_data = category_data[category_data[target] == label]
|
115
|
+
stats[f"Rate {target}: {label}"] = (
|
116
|
+
len(label_data) / len(category_data) * 100
|
117
|
+
)
|
118
|
+
|
119
|
+
all_stats.append(stats)
|
120
|
+
|
121
|
+
# Create a single DataFrame with all statistics
|
122
|
+
stats_df = pd.DataFrame(all_stats)
|
123
|
+
stats_df = stats_df.round(2) # Round to 2 decimal places for readability
|
124
|
+
|
125
|
+
# Sort the DataFrame by Protected Class and Count (descending)
|
126
|
+
stats_df = stats_df.sort_values(
|
127
|
+
["Protected Class", "Count"], ascending=[True, False]
|
128
|
+
)
|
129
|
+
|
130
|
+
return (stats_df, *tuple(figures))
|
@@ -0,0 +1,141 @@
|
|
1
|
+
# Copyright © 2023-2024 ValidMind Inc. All rights reserved.
|
2
|
+
# See the LICENSE file in the root of this repository for details.
|
3
|
+
# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
|
4
|
+
|
5
|
+
import io
|
6
|
+
import sys
|
7
|
+
|
8
|
+
import pandas as pd
|
9
|
+
|
10
|
+
from validmind import tags, tasks
|
11
|
+
from validmind.errors import MissingDependencyError
|
12
|
+
from validmind.logging import get_logger
|
13
|
+
|
14
|
+
try:
|
15
|
+
import aequitas.plot as ap
|
16
|
+
from aequitas.bias import Bias
|
17
|
+
from aequitas.group import Group
|
18
|
+
from aequitas.plotting import Plot
|
19
|
+
except ImportError as e:
|
20
|
+
raise MissingDependencyError(
|
21
|
+
"Missing required package `aequitas` for ProtectedClassesDisparity.",
|
22
|
+
required_dependencies=["aequitas"],
|
23
|
+
) from e
|
24
|
+
|
25
|
+
logger = get_logger(__name__)
|
26
|
+
|
27
|
+
|
28
|
+
@tags("bias_and_fairness")
|
29
|
+
@tasks("classification", "regression")
|
30
|
+
def ProtectedClassesDisparity(
|
31
|
+
dataset,
|
32
|
+
model,
|
33
|
+
protected_classes=None,
|
34
|
+
disparity_tolerance=1.25,
|
35
|
+
metrics=["fnr", "fpr", "tpr"],
|
36
|
+
):
|
37
|
+
"""
|
38
|
+
Investigates disparities in model performance across different protected class segments.
|
39
|
+
|
40
|
+
### Purpose
|
41
|
+
|
42
|
+
This test aims to identify and quantify potential biases in model outcomes by comparing various performance metrics
|
43
|
+
across different segments of protected classes. It helps in assessing whether the model produces discriminatory
|
44
|
+
outcomes for certain groups, which is crucial for ensuring fairness in machine learning models.
|
45
|
+
|
46
|
+
### Test Mechanism
|
47
|
+
|
48
|
+
The test performs the following steps:
|
49
|
+
1. Calculates performance metrics (e.g., false negative rate, false positive rate, true positive rate) for each segment
|
50
|
+
of the specified protected classes.
|
51
|
+
2. Computes disparity ratios by comparing these metrics between different segments and a reference group.
|
52
|
+
3. Generates visualizations showing the disparities and their relation to a user-defined disparity tolerance threshold.
|
53
|
+
4. Produces a comprehensive table with various disparity metrics for detailed analysis.
|
54
|
+
|
55
|
+
### Signs of High Risk
|
56
|
+
|
57
|
+
- Disparity ratios exceeding the specified disparity tolerance threshold.
|
58
|
+
- Consistent patterns of higher error rates or lower performance for specific protected class segments.
|
59
|
+
- Statistically significant differences in performance metrics across segments.
|
60
|
+
|
61
|
+
### Strengths
|
62
|
+
|
63
|
+
- Provides a comprehensive view of model fairness across multiple protected attributes and metrics.
|
64
|
+
- Allows for easy identification of problematic disparities through visual and tabular representations.
|
65
|
+
- Customizable disparity tolerance threshold to align with specific use-case requirements.
|
66
|
+
- Applicable to various performance metrics, offering a multi-faceted analysis of model fairness.
|
67
|
+
|
68
|
+
### Limitations
|
69
|
+
|
70
|
+
- Relies on a predefined reference group for each protected class, which may not always be the most appropriate choice.
|
71
|
+
- Does not account for intersectionality between different protected attributes.
|
72
|
+
- The interpretation of results may require domain expertise to understand the implications of observed disparities.
|
73
|
+
"""
|
74
|
+
|
75
|
+
if protected_classes is None:
|
76
|
+
logger.warning(
|
77
|
+
"No protected classes provided. Please pass the 'protected_classes' parameter to run this test."
|
78
|
+
)
|
79
|
+
return pd.DataFrame()
|
80
|
+
|
81
|
+
if sys.version_info < (3, 9):
|
82
|
+
raise RuntimeError("This test requires Python 3.9 or higher.")
|
83
|
+
|
84
|
+
df = dataset._df
|
85
|
+
|
86
|
+
for protected_class in protected_classes:
|
87
|
+
# make the dataset compatible for the python package of interest
|
88
|
+
df[protected_class] = pd.Categorical(df[protected_class]).astype("object")
|
89
|
+
|
90
|
+
df["score"] = dataset.y_pred(model).astype(int)
|
91
|
+
df["label_value"] = df[dataset.target_column].astype(int)
|
92
|
+
|
93
|
+
# let map the attributes for each protected class
|
94
|
+
# default use reference that is most observable for dictionary
|
95
|
+
attributes_and_reference_groups = {}
|
96
|
+
for protected_class in protected_classes:
|
97
|
+
attributes_and_reference_groups.update(
|
98
|
+
{protected_class: df[protected_class].value_counts().idxmax()}
|
99
|
+
)
|
100
|
+
|
101
|
+
attributes_to_audit = list(attributes_and_reference_groups.keys())
|
102
|
+
|
103
|
+
# Initialize Aequitas
|
104
|
+
g = Group()
|
105
|
+
b = Bias()
|
106
|
+
aqp = Plot()
|
107
|
+
|
108
|
+
columns_to_include = (
|
109
|
+
protected_classes + [dataset.target_column] + ["score", "label_value"]
|
110
|
+
)
|
111
|
+
|
112
|
+
# get_crosstabs returns a dataframe of the group counts and group value bias metrics.
|
113
|
+
xtab, _ = g.get_crosstabs(df[columns_to_include], attr_cols=attributes_to_audit)
|
114
|
+
bdf = b.get_disparity_predefined_groups(
|
115
|
+
xtab,
|
116
|
+
original_df=df[columns_to_include],
|
117
|
+
ref_groups_dict=attributes_and_reference_groups,
|
118
|
+
alpha=0.05,
|
119
|
+
mask_significance=True,
|
120
|
+
)
|
121
|
+
|
122
|
+
plots = []
|
123
|
+
for protected_class in protected_classes:
|
124
|
+
plot = ap.disparity(
|
125
|
+
bdf, metrics, protected_class, fairness_threshold=disparity_tolerance
|
126
|
+
)
|
127
|
+
|
128
|
+
buf = io.BytesIO() # create a bytes array to save the image into in memory
|
129
|
+
plot.save(
|
130
|
+
buf, format="png"
|
131
|
+
) # as long as the above library is installed, this will work
|
132
|
+
plots.append(buf.getvalue())
|
133
|
+
|
134
|
+
string = "_disparity"
|
135
|
+
metrics_adj = [x + string for x in metrics]
|
136
|
+
|
137
|
+
table = bdf[["attribute_name", "attribute_value"] + b.list_disparities(bdf)]
|
138
|
+
plots.append(aqp.plot_disparity_all(bdf, metrics=metrics_adj))
|
139
|
+
plots_return = tuple(plots)
|
140
|
+
|
141
|
+
return (table, *plots_return)
|
@@ -0,0 +1,180 @@
|
|
1
|
+
# Copyright © 2023-2024 ValidMind Inc. All rights reserved.
|
2
|
+
# See the LICENSE file in the root of this repository for details.
|
3
|
+
# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
|
4
|
+
|
5
|
+
import json
|
6
|
+
import sys
|
7
|
+
|
8
|
+
import matplotlib.pyplot as plt
|
9
|
+
import pandas as pd
|
10
|
+
|
11
|
+
from validmind import tags, tasks
|
12
|
+
from validmind.errors import MissingDependencyError
|
13
|
+
from validmind.logging import get_logger
|
14
|
+
|
15
|
+
try:
|
16
|
+
from fairlearn.metrics import (
|
17
|
+
MetricFrame,
|
18
|
+
count,
|
19
|
+
demographic_parity_ratio,
|
20
|
+
equalized_odds_ratio,
|
21
|
+
false_negative_rate,
|
22
|
+
false_positive_rate,
|
23
|
+
true_positive_rate,
|
24
|
+
)
|
25
|
+
from fairlearn.postprocessing import ThresholdOptimizer, plot_threshold_optimizer
|
26
|
+
except ImportError as e:
|
27
|
+
raise MissingDependencyError(
|
28
|
+
"Missing required package `fairlearn` for ProtectedClassesThresholdOptimizer.",
|
29
|
+
required_dependencies=["fairlearn"],
|
30
|
+
) from e
|
31
|
+
|
32
|
+
logger = get_logger(__name__)
|
33
|
+
|
34
|
+
|
35
|
+
@tags("bias_and_fairness")
|
36
|
+
@tasks("classification", "regression")
|
37
|
+
def ProtectedClassesThresholdOptimizer(
|
38
|
+
dataset, pipeline=None, protected_classes=None, X_train=None, y_train=None
|
39
|
+
):
|
40
|
+
"""
|
41
|
+
Obtains a classifier by applying group-specific thresholds to the provided estimator.
|
42
|
+
|
43
|
+
### Purpose
|
44
|
+
|
45
|
+
This test aims to optimize the fairness of a machine learning model by applying different
|
46
|
+
classification thresholds for different protected groups. It helps in mitigating bias and
|
47
|
+
achieving more equitable outcomes across different demographic groups.
|
48
|
+
|
49
|
+
### Test Mechanism
|
50
|
+
|
51
|
+
The test uses Fairlearn's ThresholdOptimizer to:
|
52
|
+
1. Fit an optimizer on the training data, considering protected classes.
|
53
|
+
2. Apply optimized thresholds to make predictions on the test data.
|
54
|
+
3. Calculate and report various fairness metrics.
|
55
|
+
4. Visualize the optimized thresholds.
|
56
|
+
|
57
|
+
### Signs of High Risk
|
58
|
+
|
59
|
+
- Large disparities in fairness metrics (e.g., Demographic Parity Ratio, Equalized Odds Ratio)
|
60
|
+
across different protected groups.
|
61
|
+
- Significant differences in False Positive Rates (FPR) or True Positive Rates (TPR) between groups.
|
62
|
+
- Thresholds that vary widely across different protected groups.
|
63
|
+
|
64
|
+
### Strengths
|
65
|
+
|
66
|
+
- Provides a post-processing method to improve model fairness without modifying the original model.
|
67
|
+
- Allows for balancing multiple fairness criteria simultaneously.
|
68
|
+
- Offers visual insights into the threshold optimization process.
|
69
|
+
|
70
|
+
### Limitations
|
71
|
+
|
72
|
+
- May lead to a decrease in overall model performance while improving fairness.
|
73
|
+
- Requires access to protected attribute information at prediction time.
|
74
|
+
- The effectiveness can vary depending on the chosen fairness constraint and objective.
|
75
|
+
"""
|
76
|
+
|
77
|
+
if sys.version_info < (3, 9):
|
78
|
+
raise RuntimeError("This test requires Python 3.9 or higher.")
|
79
|
+
|
80
|
+
if (
|
81
|
+
pipeline is None
|
82
|
+
or protected_classes is None
|
83
|
+
or X_train is None
|
84
|
+
or y_train is None
|
85
|
+
):
|
86
|
+
logger.warning(
|
87
|
+
"Missing required parameters. Please provide pipeline, protected_classes, X_train, and y_train."
|
88
|
+
)
|
89
|
+
return pd.DataFrame()
|
90
|
+
|
91
|
+
test_df = dataset.df
|
92
|
+
|
93
|
+
threshold_optimizer = initialize_and_fit_optimizer(
|
94
|
+
pipeline, X_train, y_train, X_train[protected_classes]
|
95
|
+
)
|
96
|
+
|
97
|
+
fig = plot_thresholds(threshold_optimizer)
|
98
|
+
|
99
|
+
target = dataset.target_column
|
100
|
+
y_pred_opt = make_predictions(threshold_optimizer, test_df, protected_classes)
|
101
|
+
|
102
|
+
fairness_metrics = calculate_fairness_metrics(
|
103
|
+
test_df, target, y_pred_opt, protected_classes
|
104
|
+
)
|
105
|
+
|
106
|
+
return (
|
107
|
+
{"DPR and EOR Table": fairness_metrics.reset_index()},
|
108
|
+
fig,
|
109
|
+
)
|
110
|
+
|
111
|
+
|
112
|
+
def initialize_and_fit_optimizer(pipeline, X_train, y_train, protected_classes_df):
|
113
|
+
threshold_optimizer = ThresholdOptimizer(
|
114
|
+
estimator=pipeline,
|
115
|
+
objective="balanced_accuracy_score",
|
116
|
+
constraints="demographic_parity",
|
117
|
+
predict_method="predict_proba",
|
118
|
+
prefit=False,
|
119
|
+
)
|
120
|
+
threshold_optimizer.fit(X_train, y_train, sensitive_features=protected_classes_df)
|
121
|
+
return threshold_optimizer
|
122
|
+
|
123
|
+
|
124
|
+
def plot_thresholds(threshold_optimizer):
|
125
|
+
fig = plt.figure()
|
126
|
+
plot_threshold_optimizer(threshold_optimizer, show_plot=False)
|
127
|
+
return fig
|
128
|
+
|
129
|
+
|
130
|
+
def make_predictions(threshold_optimizer, test_df, protected_classes):
|
131
|
+
y_pred_opt = threshold_optimizer.predict(
|
132
|
+
test_df, sensitive_features=test_df[protected_classes]
|
133
|
+
)
|
134
|
+
return y_pred_opt
|
135
|
+
|
136
|
+
|
137
|
+
def calculate_fairness_metrics(test_df, target, y_pred_opt, protected_classes):
|
138
|
+
fairness_metrics = pd.DataFrame(
|
139
|
+
columns=protected_classes,
|
140
|
+
index=["demographic parity ratio", "equal odds ratio"],
|
141
|
+
)
|
142
|
+
|
143
|
+
for feature in protected_classes:
|
144
|
+
dpr = demographic_parity_ratio(
|
145
|
+
y_true=test_df[target],
|
146
|
+
y_pred=y_pred_opt,
|
147
|
+
sensitive_features=test_df[[feature]],
|
148
|
+
)
|
149
|
+
eor = equalized_odds_ratio(
|
150
|
+
y_true=test_df[target],
|
151
|
+
y_pred=y_pred_opt,
|
152
|
+
sensitive_features=test_df[[feature]],
|
153
|
+
)
|
154
|
+
fairness_metrics[feature] = [round(dpr, 2), round(eor, 2)]
|
155
|
+
|
156
|
+
return fairness_metrics
|
157
|
+
|
158
|
+
|
159
|
+
def calculate_group_metrics(test_df, target, y_pred_opt, protected_classes):
|
160
|
+
metrics = {
|
161
|
+
"fpr": false_positive_rate,
|
162
|
+
"tpr": true_positive_rate,
|
163
|
+
"fnr": false_negative_rate,
|
164
|
+
"count": count,
|
165
|
+
}
|
166
|
+
mf = MetricFrame(
|
167
|
+
metrics=metrics,
|
168
|
+
y_true=test_df[target],
|
169
|
+
y_pred=y_pred_opt,
|
170
|
+
sensitive_features=test_df[protected_classes],
|
171
|
+
)
|
172
|
+
group_metrics = mf.by_group
|
173
|
+
return group_metrics
|
174
|
+
|
175
|
+
|
176
|
+
def get_thresholds_by_group(threshold_optimizer):
|
177
|
+
threshold_rules = threshold_optimizer.interpolated_thresholder_.interpolation_dict
|
178
|
+
thresholds = json.dumps(threshold_rules, default=str, indent=4)
|
179
|
+
thresholds_df = pd.DataFrame.from_records(json.loads(thresholds))
|
180
|
+
return thresholds_df
|
@@ -2,12 +2,15 @@
|
|
2
2
|
# See the LICENSE file in the root of this repository for details.
|
3
3
|
# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
|
4
4
|
|
5
|
+
import pandas as pd
|
5
6
|
from statsmodels.sandbox.stats.runs import runstest_1samp
|
6
7
|
|
7
|
-
from validmind
|
8
|
+
from validmind import tags, tasks
|
8
9
|
|
9
10
|
|
10
|
-
|
11
|
+
@tasks("classification", "regression")
|
12
|
+
@tags("tabular_data", "statistical_test", "statsmodels")
|
13
|
+
def RunsTest(dataset):
|
11
14
|
"""
|
12
15
|
Executes Runs Test on ML model to detect non-random patterns in output data sequence.
|
13
16
|
|
@@ -52,24 +55,18 @@ class RunsTest(Metric):
|
|
52
55
|
- Does not provide model performance evaluation; it is used to detect patterns in the sequence of outputs only.
|
53
56
|
"""
|
54
57
|
|
55
|
-
|
56
|
-
required_inputs = ["dataset"]
|
57
|
-
tasks = ["classification", "regression"]
|
58
|
-
tags = ["tabular_data", "statistical_test", "statsmodels"]
|
58
|
+
df = dataset.df[dataset.feature_columns_numeric]
|
59
59
|
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
60
|
+
runs_test_values = {}
|
61
|
+
for col in df.columns:
|
62
|
+
runs_stat, runs_p_value = runstest_1samp(df[col].values)
|
63
|
+
runs_test_values[col] = {
|
64
|
+
"stat": runs_stat,
|
65
|
+
"pvalue": runs_p_value,
|
66
|
+
}
|
65
67
|
|
66
|
-
|
67
|
-
|
68
|
-
|
68
|
+
runs_test_df = pd.DataFrame.from_dict(runs_test_values, orient="index")
|
69
|
+
runs_test_df.reset_index(inplace=True)
|
70
|
+
runs_test_df.columns = ["feature", "stat", "pvalue"]
|
69
71
|
|
70
|
-
|
71
|
-
"stat": runs_stat,
|
72
|
-
"pvalue": runs_p_value,
|
73
|
-
}
|
74
|
-
|
75
|
-
return self.cache_results(runs_test_values)
|
72
|
+
return runs_test_df
|
@@ -2,12 +2,15 @@
|
|
2
2
|
# See the LICENSE file in the root of this repository for details.
|
3
3
|
# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
|
4
4
|
|
5
|
+
import pandas as pd
|
5
6
|
from scipy import stats
|
6
7
|
|
7
|
-
from validmind
|
8
|
+
from validmind import tags, tasks
|
8
9
|
|
9
10
|
|
10
|
-
|
11
|
+
@tasks("classification", "regression")
|
12
|
+
@tags("tabular_data", "data_distribution", "statistical_test")
|
13
|
+
def ShapiroWilk(dataset):
|
11
14
|
"""
|
12
15
|
Evaluates feature-wise normality of training data using the Shapiro-Wilk test.
|
13
16
|
|
@@ -49,23 +52,18 @@ class ShapiroWilk(Metric):
|
|
49
52
|
- Lastly, the Shapiro-Wilk test is not optimally suited for processing data with pronounced skewness or kurtosis.
|
50
53
|
"""
|
51
54
|
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
"stat": sw_stat,
|
68
|
-
"pvalue": sw_pvalue,
|
69
|
-
}
|
70
|
-
|
71
|
-
return self.cache_results(sw_values)
|
55
|
+
df = dataset.df[dataset.feature_columns_numeric]
|
56
|
+
|
57
|
+
sw_values = {}
|
58
|
+
for col in df.columns:
|
59
|
+
sw_stat, sw_pvalue = stats.shapiro(df[col].values)
|
60
|
+
sw_values[col] = {
|
61
|
+
"stat": sw_stat,
|
62
|
+
"pvalue": sw_pvalue,
|
63
|
+
}
|
64
|
+
|
65
|
+
sw_df = pd.DataFrame.from_dict(sw_values, orient="index")
|
66
|
+
sw_df.reset_index(inplace=True)
|
67
|
+
sw_df.columns = ["column", "stat", "pvalue"]
|
68
|
+
|
69
|
+
return sw_df
|
@@ -9,8 +9,7 @@ Threshold based tests
|
|
9
9
|
import re
|
10
10
|
from dataclasses import dataclass
|
11
11
|
|
12
|
-
import
|
13
|
-
import seaborn as sns
|
12
|
+
import plotly.graph_objects as go
|
14
13
|
|
15
14
|
from validmind.vm_models import Figure, ThresholdTest, VMDataset
|
16
15
|
|
@@ -74,25 +73,23 @@ class Hashtags(ThresholdTest):
|
|
74
73
|
text_column = self.inputs.dataset.text_column
|
75
74
|
|
76
75
|
def find_hash(text):
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
)
|
85
|
-
temp = (
|
86
|
-
temp.to_frame()
|
87
|
-
.reset_index()
|
88
|
-
.rename(columns={"index": "Hashtag", text_column: "count"})
|
89
|
-
)
|
76
|
+
return re.findall(r"(?<=#)\w+", str(text))
|
77
|
+
|
78
|
+
# Extract hashtags from the text column and count occurrences
|
79
|
+
hashtags = self.inputs.dataset.df[text_column].apply(find_hash).explode()
|
80
|
+
temp = hashtags.value_counts().head(self.params["top_hashtags"])
|
81
|
+
|
82
|
+
print(f"temp: {temp}")
|
90
83
|
|
91
84
|
figures = []
|
92
85
|
if not temp.empty:
|
93
|
-
fig =
|
94
|
-
|
95
|
-
|
86
|
+
fig = go.Figure(data=[go.Bar(x=temp.index, y=temp.values)])
|
87
|
+
fig.update_layout(
|
88
|
+
title="Top Hashtags",
|
89
|
+
xaxis_title="Hashtag",
|
90
|
+
yaxis_title="Count",
|
91
|
+
xaxis_tickangle=-45,
|
92
|
+
)
|
96
93
|
figures.append(
|
97
94
|
Figure(
|
98
95
|
for_object=self,
|
@@ -100,7 +97,5 @@ class Hashtags(ThresholdTest):
|
|
100
97
|
figure=fig,
|
101
98
|
)
|
102
99
|
)
|
103
|
-
# Do this if you want to prevent the figure from being displayed
|
104
|
-
plt.close("all")
|
105
100
|
|
106
101
|
return self.cache_results([], passed=True, figures=figures)
|
@@ -84,7 +84,6 @@ class TextDescription(Metric):
|
|
84
84
|
tags = ["nlp", "text_data", "visualization"]
|
85
85
|
|
86
86
|
def general_text_metrics(self, df, text_column):
|
87
|
-
nltk.download("punkt", quiet=True)
|
88
87
|
results = []
|
89
88
|
|
90
89
|
for text in df[text_column]:
|
@@ -175,6 +174,9 @@ class TextDescription(Metric):
|
|
175
174
|
if not isinstance(self.inputs.dataset, VMDataset):
|
176
175
|
raise ValueError("TextDescription requires a validmind Dataset object")
|
177
176
|
|
177
|
+
# download nltk data
|
178
|
+
nltk.download("punkt_tab", quiet=True)
|
179
|
+
|
178
180
|
df_text_description = self.text_description_table(
|
179
181
|
self.inputs.dataset.df, self.params
|
180
182
|
)
|
validmind/tests/load.py
CHANGED
@@ -15,7 +15,7 @@ from uuid import uuid4
|
|
15
15
|
import pandas as pd
|
16
16
|
from ipywidgets import HTML, Accordion
|
17
17
|
|
18
|
-
from ..errors import LoadTestError
|
18
|
+
from ..errors import LoadTestError, MissingDependencyError
|
19
19
|
from ..html_templates.content_blocks import test_content_block_html
|
20
20
|
from ..logging import get_logger
|
21
21
|
from ..unit_metrics.composite import load_composite_metric
|
@@ -88,10 +88,26 @@ def list_tests(
|
|
88
88
|
Returns:
|
89
89
|
list or pandas.DataFrame: A list of all tests or a formatted table.
|
90
90
|
"""
|
91
|
-
tests = {
|
92
|
-
|
93
|
-
|
94
|
-
|
91
|
+
tests = {}
|
92
|
+
for test_id in test_store.get_test_ids():
|
93
|
+
try:
|
94
|
+
tests[test_id] = load_test(test_id, reload=True)
|
95
|
+
except MissingDependencyError as e:
|
96
|
+
# skip tests that have missing dependencies
|
97
|
+
logger.debug(str(e))
|
98
|
+
|
99
|
+
if e.extra:
|
100
|
+
logger.info(
|
101
|
+
f"Skipping `{test_id}` as it requires extra dependencies: {e.required_dependencies}."
|
102
|
+
f" Please run `pip install validmind[{e.extra}]` to view and run this test."
|
103
|
+
)
|
104
|
+
else:
|
105
|
+
logger.info(
|
106
|
+
f"Skipping `{test_id}` as it requires missing dependencies: {e.required_dependencies}."
|
107
|
+
" Please install the missing dependencies to view and run this test."
|
108
|
+
)
|
109
|
+
|
110
|
+
continue
|
95
111
|
|
96
112
|
# first search by the filter string since it's the most general search
|
97
113
|
if filter is not None:
|