explainiverse 0.1.0a1__py3-none-any.whl → 0.1.1a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- explainiverse/engine/__init__.py +0 -0
- explainiverse/engine/suite.py +143 -0
- explainiverse/evaluation/__init__.py +0 -0
- explainiverse/evaluation/metrics.py +233 -0
- explainiverse-0.1.1a1.dist-info/METADATA +128 -0
- {explainiverse-0.1.0a1.dist-info → explainiverse-0.1.1a1.dist-info}/RECORD +8 -4
- explainiverse-0.1.0a1.dist-info/METADATA +0 -83
- {explainiverse-0.1.0a1.dist-info → explainiverse-0.1.1a1.dist-info}/LICENSE +0 -0
- {explainiverse-0.1.0a1.dist-info → explainiverse-0.1.1a1.dist-info}/WHEEL +0 -0
|
File without changes
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# src/explainiverse/engine/suite.py
|
|
2
|
+
|
|
3
|
+
from explainiverse.core.explanation import Explanation
|
|
4
|
+
from explainiverse.explainers.attribution.lime_wrapper import LimeExplainer
|
|
5
|
+
from explainiverse.explainers.attribution.shap_wrapper import ShapExplainer
|
|
6
|
+
from explainiverse.evaluation.metrics import compute_roar
|
|
7
|
+
from sklearn.metrics import accuracy_score
|
|
8
|
+
from sklearn.linear_model import LogisticRegression
|
|
9
|
+
|
|
10
|
+
class ExplanationSuite:
|
|
11
|
+
"""
|
|
12
|
+
Runs multiple explainers on a single instance and compares their outputs.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, model, explainer_configs, data_meta=None):
|
|
16
|
+
"""
|
|
17
|
+
Args:
|
|
18
|
+
model: a model adapter (e.g., SklearnAdapter)
|
|
19
|
+
explainer_configs: list of (name, kwargs) tuples for explainers
|
|
20
|
+
data_meta: optional metadata about the task, scope, or preference
|
|
21
|
+
"""
|
|
22
|
+
self.model = model
|
|
23
|
+
self.configs = explainer_configs
|
|
24
|
+
self.data_meta = data_meta or {}
|
|
25
|
+
self.explanations = {}
|
|
26
|
+
|
|
27
|
+
def run(self, instance):
|
|
28
|
+
"""
|
|
29
|
+
Run all configured explainers on a single instance.
|
|
30
|
+
"""
|
|
31
|
+
for name, params in self.configs:
|
|
32
|
+
explainer = self._load_explainer(name, **params)
|
|
33
|
+
explanation = explainer.explain(instance)
|
|
34
|
+
self.explanations[name] = explanation
|
|
35
|
+
return self.explanations
|
|
36
|
+
|
|
37
|
+
def compare(self):
|
|
38
|
+
"""
|
|
39
|
+
Print attribution scores side-by-side.
|
|
40
|
+
"""
|
|
41
|
+
keys = set()
|
|
42
|
+
for explanation in self.explanations.values():
|
|
43
|
+
keys.update(explanation.explanation_data.get("feature_attributions", {}).keys())
|
|
44
|
+
|
|
45
|
+
print("\nSide-by-Side Comparison:")
|
|
46
|
+
for key in sorted(keys):
|
|
47
|
+
row = [f"{key}"]
|
|
48
|
+
for name in self.explanations:
|
|
49
|
+
value = self.explanations[name].explanation_data.get("feature_attributions", {}).get(key, "—")
|
|
50
|
+
row.append(f"{name}: {value:.4f}" if isinstance(value, float) else f"{name}: {value}")
|
|
51
|
+
print(" | ".join(row))
|
|
52
|
+
|
|
53
|
+
def suggest_best(self):
|
|
54
|
+
"""
|
|
55
|
+
Suggest the best explainer based on model type, output structure, and task metadata.
|
|
56
|
+
"""
|
|
57
|
+
if "task" in self.data_meta:
|
|
58
|
+
task = self.data_meta["task"]
|
|
59
|
+
else:
|
|
60
|
+
task = "unknown"
|
|
61
|
+
|
|
62
|
+
model = self.model.model
|
|
63
|
+
|
|
64
|
+
# 1. Regression: SHAP preferred due to consistent output
|
|
65
|
+
if task == "regression":
|
|
66
|
+
return "shap"
|
|
67
|
+
|
|
68
|
+
# 2. Model with `predict_proba` → SHAP handles probabilistic outputs well
|
|
69
|
+
if hasattr(model, "predict_proba"):
|
|
70
|
+
try:
|
|
71
|
+
output = self.model.predict([[0] * model.n_features_in_])
|
|
72
|
+
if output.shape[1] > 2:
|
|
73
|
+
return "shap" # Multi-class, SHAP more stable
|
|
74
|
+
else:
|
|
75
|
+
return "lime" # Binary, both are okay
|
|
76
|
+
except Exception:
|
|
77
|
+
return "shap"
|
|
78
|
+
|
|
79
|
+
# 3. Tree-based models → prefer SHAP (TreeSHAP if available)
|
|
80
|
+
if "tree" in str(type(model)).lower():
|
|
81
|
+
return "shap"
|
|
82
|
+
|
|
83
|
+
# 4. Default fallback
|
|
84
|
+
return "lime"
|
|
85
|
+
|
|
86
|
+
def _load_explainer(self, name, **kwargs):
|
|
87
|
+
if name == "lime":
|
|
88
|
+
return LimeExplainer(model=self.model, **kwargs)
|
|
89
|
+
elif name == "shap":
|
|
90
|
+
return ShapExplainer(model=self.model, **kwargs)
|
|
91
|
+
else:
|
|
92
|
+
raise ValueError(f"Unknown explainer: {name}")
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def evaluate_roar(
|
|
97
|
+
self,
|
|
98
|
+
X_train,
|
|
99
|
+
y_train,
|
|
100
|
+
X_test,
|
|
101
|
+
y_test,
|
|
102
|
+
top_k: int = 2,
|
|
103
|
+
model_class=None,
|
|
104
|
+
model_kwargs: dict = None
|
|
105
|
+
):
|
|
106
|
+
"""
|
|
107
|
+
Evaluate each explainer using ROAR (Remove And Retrain).
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
X_train, y_train: training data
|
|
111
|
+
X_test, y_test: test data
|
|
112
|
+
top_k: number of features to mask
|
|
113
|
+
model_class: model constructor with .fit() and .predict() (default: same as current model)
|
|
114
|
+
model_kwargs: optional keyword args for new model instance
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
Dict of {explainer_name: accuracy drop (baseline - retrained)}
|
|
118
|
+
"""
|
|
119
|
+
from explainiverse.evaluation.metrics import compute_roar
|
|
120
|
+
|
|
121
|
+
model_kwargs = model_kwargs or {}
|
|
122
|
+
|
|
123
|
+
# Default to type(self.model.model) if not provided
|
|
124
|
+
if model_class is None:
|
|
125
|
+
model_class = type(self.model.model)
|
|
126
|
+
|
|
127
|
+
roar_scores = {}
|
|
128
|
+
|
|
129
|
+
for name, explanation in self.explanations.items():
|
|
130
|
+
print(f"[ROAR] Evaluating explainer: {name}")
|
|
131
|
+
roar = compute_roar(
|
|
132
|
+
model_class=model_class,
|
|
133
|
+
X_train=X_train,
|
|
134
|
+
y_train=y_train,
|
|
135
|
+
X_test=X_test,
|
|
136
|
+
y_test=y_test,
|
|
137
|
+
explanations=[explanation], # single-instance for now
|
|
138
|
+
top_k=top_k,
|
|
139
|
+
model_kwargs=model_kwargs
|
|
140
|
+
)
|
|
141
|
+
roar_scores[name] = roar
|
|
142
|
+
|
|
143
|
+
return roar_scores
|
|
File without changes
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from explainiverse.core.explanation import Explanation
|
|
3
|
+
from sklearn.metrics import accuracy_score
|
|
4
|
+
import copy
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def compute_aopc(
|
|
8
|
+
model,
|
|
9
|
+
instance: np.ndarray,
|
|
10
|
+
explanation: Explanation,
|
|
11
|
+
num_steps: int = 10,
|
|
12
|
+
baseline_value: float = 0.0
|
|
13
|
+
) -> float:
|
|
14
|
+
"""
|
|
15
|
+
Computes Area Over the Perturbation Curve (AOPC) by iteratively removing top features.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
model: wrapped model with .predict() method
|
|
19
|
+
instance: input sample (1D array)
|
|
20
|
+
explanation: Explanation object
|
|
21
|
+
num_steps: number of top features to remove
|
|
22
|
+
baseline_value: value to replace removed features with (e.g., 0, mean)
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
AOPC score (higher means explanation is more faithful)
|
|
26
|
+
"""
|
|
27
|
+
base_pred = model.predict(instance.reshape(1, -1))[0]
|
|
28
|
+
attributions = explanation.explanation_data.get("feature_attributions", {})
|
|
29
|
+
|
|
30
|
+
if not attributions:
|
|
31
|
+
raise ValueError("No feature attributions found in explanation.")
|
|
32
|
+
|
|
33
|
+
# Sort features by abs importance
|
|
34
|
+
sorted_features = sorted(
|
|
35
|
+
attributions.items(),
|
|
36
|
+
key=lambda x: abs(x[1]),
|
|
37
|
+
reverse=True
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# Try to map feature names to indices
|
|
41
|
+
feature_indices = []
|
|
42
|
+
for i, (fname, _) in enumerate(sorted_features):
|
|
43
|
+
try:
|
|
44
|
+
idx = explanation.feature_names.index(fname)
|
|
45
|
+
except Exception:
|
|
46
|
+
idx = i # fallback: assume order
|
|
47
|
+
feature_indices.append(idx)
|
|
48
|
+
|
|
49
|
+
deltas = []
|
|
50
|
+
modified = instance.copy()
|
|
51
|
+
|
|
52
|
+
for i in range(min(num_steps, len(feature_indices))):
|
|
53
|
+
idx = feature_indices[i]
|
|
54
|
+
modified[idx] = baseline_value
|
|
55
|
+
new_pred = model.predict(modified.reshape(1, -1))[0]
|
|
56
|
+
delta = abs(base_pred - new_pred)
|
|
57
|
+
deltas.append(delta)
|
|
58
|
+
|
|
59
|
+
return np.mean(deltas)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def compute_batch_aopc(
|
|
63
|
+
model,
|
|
64
|
+
X: np.ndarray,
|
|
65
|
+
explanations: dict,
|
|
66
|
+
num_steps: int = 10,
|
|
67
|
+
baseline_value: float = 0.0
|
|
68
|
+
) -> dict:
|
|
69
|
+
"""
|
|
70
|
+
Compute average AOPC for multiple explainers over a batch of instances.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
model: wrapped model
|
|
74
|
+
X: 2D input array
|
|
75
|
+
explanations: dict of {explainer_name: list of Explanation objects}
|
|
76
|
+
num_steps: number of top features to remove
|
|
77
|
+
baseline_value: value to replace features with
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dict of {explainer_name: mean AOPC score}
|
|
81
|
+
"""
|
|
82
|
+
results = {}
|
|
83
|
+
|
|
84
|
+
for explainer_name, expl_list in explanations.items():
|
|
85
|
+
scores = []
|
|
86
|
+
for i, exp in enumerate(expl_list):
|
|
87
|
+
instance = X[i]
|
|
88
|
+
score = compute_aopc(model, instance, exp, num_steps, baseline_value)
|
|
89
|
+
scores.append(score)
|
|
90
|
+
results[explainer_name] = np.mean(scores)
|
|
91
|
+
|
|
92
|
+
return results
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def compute_roar(
|
|
96
|
+
model_class,
|
|
97
|
+
X_train: np.ndarray,
|
|
98
|
+
y_train: np.ndarray,
|
|
99
|
+
X_test: np.ndarray,
|
|
100
|
+
y_test: np.ndarray,
|
|
101
|
+
explanations: list,
|
|
102
|
+
top_k: int = 3,
|
|
103
|
+
baseline_value: float = 0.0,
|
|
104
|
+
model_kwargs: dict = None
|
|
105
|
+
) -> float:
|
|
106
|
+
"""
|
|
107
|
+
Compute ROAR (Remove And Retrain) using top-k important features from explanations.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
model_class: uninstantiated model class (e.g. LogisticRegression)
|
|
111
|
+
X_train: full training data
|
|
112
|
+
y_train: training labels
|
|
113
|
+
X_test: test features
|
|
114
|
+
y_test: test labels
|
|
115
|
+
explanations: list of Explanation objects (one per train instance)
|
|
116
|
+
top_k: number of top features to remove
|
|
117
|
+
baseline_value: what to set removed features to
|
|
118
|
+
model_kwargs: optional kwargs to pass to model_class
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Accuracy drop (baseline_acc - retrained_acc)
|
|
122
|
+
"""
|
|
123
|
+
model_kwargs = model_kwargs or {}
|
|
124
|
+
|
|
125
|
+
# Baseline model
|
|
126
|
+
baseline_model = model_class(**model_kwargs)
|
|
127
|
+
baseline_model.fit(X_train, y_train)
|
|
128
|
+
baseline_preds = baseline_model.predict(X_test)
|
|
129
|
+
baseline_acc = accuracy_score(y_test, baseline_preds)
|
|
130
|
+
|
|
131
|
+
# Compute top-k feature indices from attributions (use mode)
|
|
132
|
+
feature_counts = {}
|
|
133
|
+
for exp in explanations:
|
|
134
|
+
for fname, val in sorted(exp.explanation_data["feature_attributions"].items(), key=lambda x: abs(x[1]), reverse=True)[:top_k]:
|
|
135
|
+
try:
|
|
136
|
+
idx = exp.feature_names.index(fname)
|
|
137
|
+
feature_counts[idx] = feature_counts.get(idx, 0) + 1
|
|
138
|
+
except:
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
top_features = sorted(feature_counts.items(), key=lambda x: x[1], reverse=True)[:top_k]
|
|
142
|
+
top_feature_indices = [idx for idx, _ in top_features]
|
|
143
|
+
|
|
144
|
+
# Remove top-k from training and test data
|
|
145
|
+
X_train_mod = copy.deepcopy(X_train)
|
|
146
|
+
X_test_mod = copy.deepcopy(X_test)
|
|
147
|
+
|
|
148
|
+
# Prepare feature-wise baselines
|
|
149
|
+
# Compute or assign feature-wise baseline values
|
|
150
|
+
if not isinstance(
|
|
151
|
+
baseline_value,
|
|
152
|
+
(str, float, int, np.number, np.ndarray)
|
|
153
|
+
) and not callable(baseline_value):
|
|
154
|
+
raise ValueError(f"Invalid baseline_value type: {type(baseline_value)}")
|
|
155
|
+
|
|
156
|
+
if isinstance(baseline_value, str):
|
|
157
|
+
if baseline_value == "mean":
|
|
158
|
+
feature_baseline = np.mean(X_train, axis=0)
|
|
159
|
+
elif baseline_value == "median":
|
|
160
|
+
feature_baseline = np.median(X_train, axis=0)
|
|
161
|
+
else:
|
|
162
|
+
raise ValueError(f"Unsupported string baseline: {baseline_value}")
|
|
163
|
+
elif callable(baseline_value):
|
|
164
|
+
feature_baseline = baseline_value(X_train)
|
|
165
|
+
elif isinstance(baseline_value, np.ndarray):
|
|
166
|
+
if baseline_value.shape != (X_train.shape[1],):
|
|
167
|
+
raise ValueError("baseline_value ndarray must match number of features")
|
|
168
|
+
feature_baseline = baseline_value
|
|
169
|
+
elif isinstance(baseline_value, (float, int, np.number)):
|
|
170
|
+
feature_baseline = np.full(X_train.shape[1], baseline_value)
|
|
171
|
+
else:
|
|
172
|
+
raise ValueError(f"Invalid baseline_value type: {type(baseline_value)}")
|
|
173
|
+
|
|
174
|
+
for idx in top_feature_indices:
|
|
175
|
+
X_train_mod[:, idx] = feature_baseline[idx]
|
|
176
|
+
X_test_mod[:, idx] = feature_baseline[idx]
|
|
177
|
+
# X_train_mod[:, idx] = baseline_value
|
|
178
|
+
# X_test_mod[:, idx] = baseline_value
|
|
179
|
+
|
|
180
|
+
# Retrain and evaluate
|
|
181
|
+
retrained_model = model_class(**model_kwargs)
|
|
182
|
+
retrained_model.fit(X_train_mod, y_train)
|
|
183
|
+
retrained_preds = retrained_model.predict(X_test_mod)
|
|
184
|
+
retrained_acc = accuracy_score(y_test, retrained_preds)
|
|
185
|
+
|
|
186
|
+
return baseline_acc - retrained_acc
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def compute_roar_curve(
|
|
190
|
+
model_class,
|
|
191
|
+
X_train,
|
|
192
|
+
y_train,
|
|
193
|
+
X_test,
|
|
194
|
+
y_test,
|
|
195
|
+
explanations,
|
|
196
|
+
max_k=5,
|
|
197
|
+
baseline_value="mean",
|
|
198
|
+
model_kwargs=None
|
|
199
|
+
) -> dict:
|
|
200
|
+
"""
|
|
201
|
+
Compute ROAR accuracy drops across a range of top-k features removed.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
model_class: model type (e.g. LogisticRegression)
|
|
205
|
+
X_train, y_train, X_test, y_test: full dataset
|
|
206
|
+
explanations: list of Explanation objects
|
|
207
|
+
max_k: maximum top-k to try
|
|
208
|
+
baseline_value: string, scalar, ndarray, or callable
|
|
209
|
+
model_kwargs: passed to model class
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Dict of {k: accuracy drop} for k in 1..max_k
|
|
213
|
+
"""
|
|
214
|
+
from copy import deepcopy
|
|
215
|
+
|
|
216
|
+
model_kwargs = model_kwargs or {}
|
|
217
|
+
curve = {}
|
|
218
|
+
|
|
219
|
+
for k in range(1, max_k + 1):
|
|
220
|
+
acc_drop = compute_roar(
|
|
221
|
+
model_class=model_class,
|
|
222
|
+
X_train=deepcopy(X_train),
|
|
223
|
+
y_train=deepcopy(y_train),
|
|
224
|
+
X_test=deepcopy(X_test),
|
|
225
|
+
y_test=deepcopy(y_test),
|
|
226
|
+
explanations=deepcopy(explanations),
|
|
227
|
+
top_k=k,
|
|
228
|
+
baseline_value=baseline_value,
|
|
229
|
+
model_kwargs=deepcopy(model_kwargs)
|
|
230
|
+
)
|
|
231
|
+
curve[k] = acc_drop
|
|
232
|
+
|
|
233
|
+
return curve
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: explainiverse
|
|
3
|
+
Version: 0.1.1a1
|
|
4
|
+
Summary: Unified, extensible explainability framework supporting LIME, SHAP, and custom adapters
|
|
5
|
+
Home-page: https://github.com/jemsbhai/explainiverse
|
|
6
|
+
License: MIT
|
|
7
|
+
Author: Muntaser Syed
|
|
8
|
+
Author-email: jemsbhai@gmail.com
|
|
9
|
+
Requires-Python: >=3.10,<3.13
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
|
|
16
|
+
Requires-Dist: numpy (==1.24.4)
|
|
17
|
+
Requires-Dist: scikit-learn (>=1.1,<1.4)
|
|
18
|
+
Requires-Dist: shap (>=0.48.0,<0.49.0)
|
|
19
|
+
Requires-Dist: xgboost (>=3.0.2,<4.0.0)
|
|
20
|
+
Project-URL: Repository, https://github.com/jemsbhai/explainiverse
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
|
|
23
|
+
# Explainiverse
|
|
24
|
+
|
|
25
|
+
**Explainiverse** is a unified, extensible, and testable Python framework for Explainable AI (XAI).
|
|
26
|
+
It offers a standardized interface for model-agnostic explainability, evaluation metrics like AOPC and ROAR, and support for multiple XAI methods out of the box.
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+
## Features
|
|
31
|
+
|
|
32
|
+
- Standardized `Explainer` API (`BaseExplainer`)
|
|
33
|
+
- Support for:
|
|
34
|
+
- Local and global feature attribution
|
|
35
|
+
- Regression and classification tasks
|
|
36
|
+
- Integrated explainers:
|
|
37
|
+
- **LIME** (tabular, local surrogate)
|
|
38
|
+
- **SHAP** (KernelExplainer with multi-class, regression, cohort support)
|
|
39
|
+
- Evaluation metrics:
|
|
40
|
+
- **AOPC** (Area Over Perturbation Curve)
|
|
41
|
+
- **ROAR** (Remove And Retrain)
|
|
42
|
+
- Multiple `top_k` support
|
|
43
|
+
- Baseline options: `"mean"`, `"median"`, `np.ndarray`, `callable`
|
|
44
|
+
- Curve generation for ROAR vs feature importance
|
|
45
|
+
- Explainability Suite:
|
|
46
|
+
- Run and compare multiple explainers
|
|
47
|
+
- Auto-suggestion based on model/task type
|
|
48
|
+
- Built-in support for models: `LogisticRegression`, `RandomForest`, `SVC`, `KNN`, `XGB`, `NaiveBayes`, and more
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
## Installation
|
|
54
|
+
|
|
55
|
+
From PyPI:
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
pip install explainiverse
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
For development use:
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
git clone https://github.com/jemsbhai/explainiverse.git
|
|
65
|
+
cd explainiverse
|
|
66
|
+
poetry install
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## Quick Example
|
|
72
|
+
```python
|
|
73
|
+
|
|
74
|
+
from explainiverse.adapters.sklearn_adapter import SklearnAdapter
|
|
75
|
+
from explainiverse.explainers.attribution.lime_wrapper import LimeExplainer
|
|
76
|
+
from explainiverse.engine.suite import ExplanationSuite
|
|
77
|
+
|
|
78
|
+
# Wrap your model
|
|
79
|
+
adapter = SklearnAdapter(your_model, class_names=["yes", "no"])
|
|
80
|
+
|
|
81
|
+
# Build the suite
|
|
82
|
+
suite = ExplanationSuite(
|
|
83
|
+
model=adapter,
|
|
84
|
+
explainer_configs=[
|
|
85
|
+
("lime", {...}),
|
|
86
|
+
("shap", {...})
|
|
87
|
+
],
|
|
88
|
+
data_meta={"task": "classification"}
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
results = suite.run(instance)
|
|
92
|
+
suite.compare()
|
|
93
|
+
suite.evaluate_roar(X_train, y_train, X_test, y_test, top_k=3)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
---
|
|
98
|
+
|
|
99
|
+
## Running Tests
|
|
100
|
+
|
|
101
|
+
All tests can be run using:
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
poetry run python tests/test_all.py
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
For individual component testing:
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
poetry run python tests/test_shap_explainer.py
|
|
111
|
+
poetry run python tests/test_lime_explainer.py
|
|
112
|
+
poetry run python tests/test_evaluation_metrics.py
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
---
|
|
116
|
+
|
|
117
|
+
## Documentation
|
|
118
|
+
|
|
119
|
+
Documentation is currently in development.
|
|
120
|
+
Until then, test files (especially `test_shap_explainer.py`) demonstrate usage and structure.
|
|
121
|
+
|
|
122
|
+
---
|
|
123
|
+
|
|
124
|
+
## License
|
|
125
|
+
|
|
126
|
+
This project is licensed under the MIT License.
|
|
127
|
+
|
|
128
|
+
|
|
@@ -5,11 +5,15 @@ explainiverse/adapters/sklearn_adapter.py,sha256=WQC-i4OIaR0M-AILXV5OvNo9H2JA0dV
|
|
|
5
5
|
explainiverse/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
6
|
explainiverse/core/explainer.py,sha256=8vI_Paj1cpbhLK6GP1Ckq2SzW4FyjUgxOpuLKE12ddI,815
|
|
7
7
|
explainiverse/core/explanation.py,sha256=Gxz-0lU4YVZfV1Bsrzma7uvfze127aCmqDaVK1dg-hI,905
|
|
8
|
+
explainiverse/engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
+
explainiverse/engine/suite.py,sha256=brBQNCHMBobaOjSVpoBBsbo3gep9qbEcT0NSdU6SPxw,5056
|
|
10
|
+
explainiverse/evaluation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
|
+
explainiverse/evaluation/metrics.py,sha256=N_EucJ8Ud1Wa3IU6ZxzKKILF7UkQDLKEyhAvUaDHIGM,7685
|
|
8
12
|
explainiverse/explainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
13
|
explainiverse/explainers/attribution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
14
|
explainiverse/explainers/attribution/lime_wrapper.py,sha256=hFd6tQ7uE6R3ORHTuT-w2vW9JkvFuYbEZDpGqRLRNto,2107
|
|
11
15
|
explainiverse/explainers/attribution/shap_wrapper.py,sha256=yAQDZNPFbn_H0AMFhWKMEoQb8IG05Tg6oRBcWRw4CP8,2378
|
|
12
|
-
explainiverse-0.1.
|
|
13
|
-
explainiverse-0.1.
|
|
14
|
-
explainiverse-0.1.
|
|
15
|
-
explainiverse-0.1.
|
|
16
|
+
explainiverse-0.1.1a1.dist-info/LICENSE,sha256=28rbHe8rJgmUlRdxJACfq1Sj-MtCEhyHxkJedQd1ZYA,1070
|
|
17
|
+
explainiverse-0.1.1a1.dist-info/METADATA,sha256=rkIf7lkW6PaxJItQMdAamsOb4_DpBeVRxV0M16PrfR4,3263
|
|
18
|
+
explainiverse-0.1.1a1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
19
|
+
explainiverse-0.1.1a1.dist-info/RECORD,,
|
|
@@ -1,83 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: explainiverse
|
|
3
|
-
Version: 0.1.0a1
|
|
4
|
-
Summary: Unified, extensible explainability framework supporting LIME, SHAP, and custom adapters
|
|
5
|
-
Home-page: https://github.com/jemsbhai/explainiverse
|
|
6
|
-
License: MIT
|
|
7
|
-
Author: Muntaser Syed
|
|
8
|
-
Author-email: jemsbhai@gmail.com
|
|
9
|
-
Requires-Python: >=3.10,<3.13
|
|
10
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
-
Classifier: Programming Language :: Python :: 3
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
-
Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
|
|
16
|
-
Requires-Dist: numpy (==1.24.4)
|
|
17
|
-
Requires-Dist: scikit-learn (>=1.1,<1.4)
|
|
18
|
-
Requires-Dist: shap (>=0.48.0,<0.49.0)
|
|
19
|
-
Project-URL: Repository, https://github.com/jemsbhai/explainiverse
|
|
20
|
-
Description-Content-Type: text/markdown
|
|
21
|
-
|
|
22
|
-
# Explainiverse
|
|
23
|
-
|
|
24
|
-
Explainiverse is a unified, extensible, and testable Python framework for explainable AI (XAI).
|
|
25
|
-
It provides a consistent API and support for post-hoc explainers like LIME and SHAP, model adapters, and rigorous evaluation strategies.
|
|
26
|
-
|
|
27
|
-
---
|
|
28
|
-
|
|
29
|
-
## Features
|
|
30
|
-
|
|
31
|
-
- Standardized Explainer interface (`BaseExplainer`)
|
|
32
|
-
- Support for classification, regression, and multi-class models
|
|
33
|
-
- Integrated explainers:
|
|
34
|
-
- LIME (Local surrogate models)
|
|
35
|
-
- SHAP (KernelExplainer with per-class and global support)
|
|
36
|
-
- Adapter layer for scikit-learn models
|
|
37
|
-
- Explanation object with structured output and future extensibility for `.plot()`
|
|
38
|
-
- Full unit test suite covering classification, regression, global/cohort SHAP, and adapter behavior
|
|
39
|
-
|
|
40
|
-
---
|
|
41
|
-
|
|
42
|
-
## Installation
|
|
43
|
-
|
|
44
|
-
This package will soon be available on PyPI.
|
|
45
|
-
|
|
46
|
-
For development use:
|
|
47
|
-
|
|
48
|
-
```bash
|
|
49
|
-
git clone https://github.com/YOUR_USERNAME/explainiverse.git
|
|
50
|
-
cd explainiverse
|
|
51
|
-
poetry install
|
|
52
|
-
```
|
|
53
|
-
|
|
54
|
-
---
|
|
55
|
-
|
|
56
|
-
## Running Tests
|
|
57
|
-
|
|
58
|
-
All tests can be run using:
|
|
59
|
-
|
|
60
|
-
```bash
|
|
61
|
-
poetry run python tests/test_all.py
|
|
62
|
-
```
|
|
63
|
-
|
|
64
|
-
For individual component testing:
|
|
65
|
-
|
|
66
|
-
```bash
|
|
67
|
-
poetry run python tests/test_shap_explainer.py
|
|
68
|
-
poetry run python tests/test_lime_explainer.py
|
|
69
|
-
```
|
|
70
|
-
|
|
71
|
-
---
|
|
72
|
-
|
|
73
|
-
## Documentation
|
|
74
|
-
|
|
75
|
-
Documentation is currently in development.
|
|
76
|
-
Until then, test files (especially `test_shap_explainer.py`) demonstrate usage and structure.
|
|
77
|
-
|
|
78
|
-
---
|
|
79
|
-
|
|
80
|
-
## License
|
|
81
|
-
|
|
82
|
-
This project is licensed under the MIT License.
|
|
83
|
-
|
|
File without changes
|
|
File without changes
|