explainiverse 0.2.1__tar.gz → 0.2.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {explainiverse-0.2.1 → explainiverse-0.2.3}/PKG-INFO +107 -11
- {explainiverse-0.2.1 → explainiverse-0.2.3}/README.md +104 -10
- {explainiverse-0.2.1 → explainiverse-0.2.3}/pyproject.toml +5 -1
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/__init__.py +15 -3
- explainiverse-0.2.3/src/explainiverse/adapters/__init__.py +19 -0
- explainiverse-0.2.3/src/explainiverse/adapters/pytorch_adapter.py +396 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/core/registry.py +18 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/__init__.py +3 -0
- explainiverse-0.2.3/src/explainiverse/explainers/gradient/__init__.py +11 -0
- explainiverse-0.2.3/src/explainiverse/explainers/gradient/integrated_gradients.py +348 -0
- explainiverse-0.2.1/src/explainiverse/adapters/__init__.py +0 -9
- {explainiverse-0.2.1 → explainiverse-0.2.3}/LICENSE +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/adapters/base_adapter.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/adapters/sklearn_adapter.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/core/__init__.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/core/explainer.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/core/explanation.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/engine/__init__.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/engine/suite.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/evaluation/__init__.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/evaluation/metrics.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/attribution/__init__.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/attribution/lime_wrapper.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/attribution/shap_wrapper.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/attribution/treeshap_wrapper.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/counterfactual/__init__.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/counterfactual/dice_wrapper.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/global_explainers/__init__.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/global_explainers/ale.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/global_explainers/partial_dependence.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/global_explainers/permutation_importance.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/global_explainers/sage.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/rule_based/__init__.py +0 -0
- {explainiverse-0.2.1 → explainiverse-0.2.3}/src/explainiverse/explainers/rule_based/anchors_wrapper.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: explainiverse
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more
|
|
5
5
|
Home-page: https://github.com/jemsbhai/explainiverse
|
|
6
6
|
License: MIT
|
|
@@ -17,11 +17,13 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
17
17
|
Classifier: Programming Language :: Python :: 3.11
|
|
18
18
|
Classifier: Programming Language :: Python :: 3.12
|
|
19
19
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Provides-Extra: torch
|
|
20
21
|
Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
|
|
21
22
|
Requires-Dist: numpy (>=1.24,<2.0)
|
|
22
23
|
Requires-Dist: scikit-learn (>=1.1,<1.6)
|
|
23
24
|
Requires-Dist: scipy (>=1.10,<2.0)
|
|
24
25
|
Requires-Dist: shap (>=0.48.0,<0.49.0)
|
|
26
|
+
Requires-Dist: torch (>=2.0) ; extra == "torch"
|
|
25
27
|
Requires-Dist: xgboost (>=1.7,<3.0)
|
|
26
28
|
Project-URL: Repository, https://github.com/jemsbhai/explainiverse
|
|
27
29
|
Description-Content-Type: text/markdown
|
|
@@ -29,7 +31,7 @@ Description-Content-Type: text/markdown
|
|
|
29
31
|
# Explainiverse
|
|
30
32
|
|
|
31
33
|
**Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
|
|
32
|
-
It provides a standardized interface for model-agnostic explainability with
|
|
34
|
+
It provides a standardized interface for model-agnostic explainability with 10 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
|
|
33
35
|
|
|
34
36
|
---
|
|
35
37
|
|
|
@@ -40,6 +42,8 @@ It provides a standardized interface for model-agnostic explainability with 8 st
|
|
|
40
42
|
**Local Explainers** (instance-level explanations):
|
|
41
43
|
- **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
|
|
42
44
|
- **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
|
|
45
|
+
- **TreeSHAP** - Exact SHAP values for tree models, 10x+ faster ([Lundberg et al., 2018](https://arxiv.org/abs/1802.03888))
|
|
46
|
+
- **Integrated Gradients** - Axiomatic attributions for neural networks ([Sundararajan et al., 2017](https://arxiv.org/abs/1703.01365))
|
|
43
47
|
- **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
|
|
44
48
|
- **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
|
|
45
49
|
|
|
@@ -62,7 +66,7 @@ It provides a standardized interface for model-agnostic explainability with 8 st
|
|
|
62
66
|
### 🧪 Standardized Interface
|
|
63
67
|
- Consistent `BaseExplainer` API
|
|
64
68
|
- Unified `Explanation` output format
|
|
65
|
-
- Model adapters for sklearn and
|
|
69
|
+
- Model adapters for sklearn and PyTorch
|
|
66
70
|
|
|
67
71
|
---
|
|
68
72
|
|
|
@@ -74,6 +78,12 @@ From PyPI:
|
|
|
74
78
|
pip install explainiverse
|
|
75
79
|
```
|
|
76
80
|
|
|
81
|
+
With PyTorch support (for neural network explanations):
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
pip install explainiverse[torch]
|
|
85
|
+
```
|
|
86
|
+
|
|
77
87
|
For development:
|
|
78
88
|
|
|
79
89
|
```bash
|
|
@@ -100,7 +110,7 @@ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
|
|
|
100
110
|
|
|
101
111
|
# List available explainers
|
|
102
112
|
print(default_registry.list_explainers())
|
|
103
|
-
# ['lime', 'shap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
|
|
113
|
+
# ['lime', 'shap', 'treeshap', 'integrated_gradients', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
|
|
104
114
|
|
|
105
115
|
# Create and use an explainer
|
|
106
116
|
explainer = default_registry.create(
|
|
@@ -119,11 +129,11 @@ print(explanation.explanation_data["feature_attributions"])
|
|
|
119
129
|
```python
|
|
120
130
|
# Find local explainers for tabular data
|
|
121
131
|
local_tabular = default_registry.filter(scope="local", data_type="tabular")
|
|
122
|
-
print(local_tabular) # ['lime', 'shap', 'anchors', 'counterfactual']
|
|
132
|
+
print(local_tabular) # ['lime', 'shap', 'treeshap', 'integrated_gradients', 'anchors', 'counterfactual']
|
|
123
133
|
|
|
124
|
-
# Find
|
|
125
|
-
|
|
126
|
-
print(
|
|
134
|
+
# Find explainers optimized for tree models
|
|
135
|
+
tree_explainers = default_registry.filter(model_type="tree")
|
|
136
|
+
print(tree_explainers) # ['treeshap']
|
|
127
137
|
|
|
128
138
|
# Get recommendations
|
|
129
139
|
recommendations = default_registry.recommend(
|
|
@@ -133,6 +143,90 @@ recommendations = default_registry.recommend(
|
|
|
133
143
|
)
|
|
134
144
|
```
|
|
135
145
|
|
|
146
|
+
### TreeSHAP for Tree Models (10x+ Faster)
|
|
147
|
+
|
|
148
|
+
```python
|
|
149
|
+
from explainiverse.explainers import TreeShapExplainer
|
|
150
|
+
from sklearn.ensemble import RandomForestClassifier
|
|
151
|
+
|
|
152
|
+
# Train a tree-based model
|
|
153
|
+
model = RandomForestClassifier(n_estimators=100).fit(X_train, y_train)
|
|
154
|
+
|
|
155
|
+
# TreeSHAP works directly with the model (no adapter needed)
|
|
156
|
+
explainer = TreeShapExplainer(
|
|
157
|
+
model=model,
|
|
158
|
+
feature_names=feature_names,
|
|
159
|
+
class_names=class_names
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Single instance explanation
|
|
163
|
+
explanation = explainer.explain(X_test[0])
|
|
164
|
+
print(explanation.explanation_data["feature_attributions"])
|
|
165
|
+
|
|
166
|
+
# Batch explanations (efficient)
|
|
167
|
+
explanations = explainer.explain_batch(X_test[:10])
|
|
168
|
+
|
|
169
|
+
# Feature interactions
|
|
170
|
+
interactions = explainer.explain_interactions(X_test[0])
|
|
171
|
+
print(interactions.explanation_data["interaction_matrix"])
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
### PyTorch Adapter for Neural Networks
|
|
175
|
+
|
|
176
|
+
```python
|
|
177
|
+
from explainiverse import PyTorchAdapter
|
|
178
|
+
import torch.nn as nn
|
|
179
|
+
|
|
180
|
+
# Define a PyTorch model
|
|
181
|
+
model = nn.Sequential(
|
|
182
|
+
nn.Linear(10, 64),
|
|
183
|
+
nn.ReLU(),
|
|
184
|
+
nn.Linear(64, 3)
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Wrap with adapter
|
|
188
|
+
adapter = PyTorchAdapter(
|
|
189
|
+
model,
|
|
190
|
+
task="classification",
|
|
191
|
+
class_names=["cat", "dog", "bird"]
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# Use with any explainer
|
|
195
|
+
predictions = adapter.predict(X) # Returns numpy array
|
|
196
|
+
|
|
197
|
+
# Get gradients for attribution methods
|
|
198
|
+
predictions, gradients = adapter.predict_with_gradients(X)
|
|
199
|
+
|
|
200
|
+
# Access intermediate layers
|
|
201
|
+
activations = adapter.get_layer_output(X, layer_name="0")
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
### Integrated Gradients for Neural Networks
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
from explainiverse.explainers import IntegratedGradientsExplainer
|
|
208
|
+
from explainiverse import PyTorchAdapter
|
|
209
|
+
|
|
210
|
+
# Wrap your PyTorch model
|
|
211
|
+
adapter = PyTorchAdapter(model, task="classification", class_names=class_names)
|
|
212
|
+
|
|
213
|
+
# Create IG explainer
|
|
214
|
+
explainer = IntegratedGradientsExplainer(
|
|
215
|
+
model=adapter,
|
|
216
|
+
feature_names=feature_names,
|
|
217
|
+
class_names=class_names,
|
|
218
|
+
n_steps=50 # More steps = more accurate
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# Explain a prediction
|
|
222
|
+
explanation = explainer.explain(X_test[0])
|
|
223
|
+
print(explanation.explanation_data["feature_attributions"])
|
|
224
|
+
|
|
225
|
+
# Check convergence (sum of attributions ≈ F(x) - F(baseline))
|
|
226
|
+
explanation = explainer.explain(X_test[0], return_convergence_delta=True)
|
|
227
|
+
print(f"Convergence delta: {explanation.explanation_data['convergence_delta']}")
|
|
228
|
+
```
|
|
229
|
+
|
|
136
230
|
### Using Specific Explainers
|
|
137
231
|
|
|
138
232
|
```python
|
|
@@ -233,12 +327,14 @@ poetry run pytest tests/test_new_explainers.py -v
|
|
|
233
327
|
## Roadmap
|
|
234
328
|
|
|
235
329
|
- [x] LIME, SHAP (KernelSHAP)
|
|
330
|
+
- [x] TreeSHAP (optimized for tree models) ✅
|
|
236
331
|
- [x] Anchors, Counterfactuals
|
|
237
332
|
- [x] Permutation Importance, PDP, ALE, SAGE
|
|
238
333
|
- [x] Explainer Registry with filtering
|
|
239
|
-
- [
|
|
240
|
-
- [
|
|
241
|
-
- [ ]
|
|
334
|
+
- [x] PyTorch Adapter ✅
|
|
335
|
+
- [x] Integrated Gradients ✅ NEW
|
|
336
|
+
- [ ] GradCAM for CNNs
|
|
337
|
+
- [ ] TensorFlow adapter
|
|
242
338
|
- [ ] Interactive visualization dashboard
|
|
243
339
|
|
|
244
340
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# Explainiverse
|
|
2
2
|
|
|
3
3
|
**Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
|
|
4
|
-
It provides a standardized interface for model-agnostic explainability with
|
|
4
|
+
It provides a standardized interface for model-agnostic explainability with 10 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
|
|
5
5
|
|
|
6
6
|
---
|
|
7
7
|
|
|
@@ -12,6 +12,8 @@ It provides a standardized interface for model-agnostic explainability with 8 st
|
|
|
12
12
|
**Local Explainers** (instance-level explanations):
|
|
13
13
|
- **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
|
|
14
14
|
- **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
|
|
15
|
+
- **TreeSHAP** - Exact SHAP values for tree models, 10x+ faster ([Lundberg et al., 2018](https://arxiv.org/abs/1802.03888))
|
|
16
|
+
- **Integrated Gradients** - Axiomatic attributions for neural networks ([Sundararajan et al., 2017](https://arxiv.org/abs/1703.01365))
|
|
15
17
|
- **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
|
|
16
18
|
- **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
|
|
17
19
|
|
|
@@ -34,7 +36,7 @@ It provides a standardized interface for model-agnostic explainability with 8 st
|
|
|
34
36
|
### 🧪 Standardized Interface
|
|
35
37
|
- Consistent `BaseExplainer` API
|
|
36
38
|
- Unified `Explanation` output format
|
|
37
|
-
- Model adapters for sklearn and
|
|
39
|
+
- Model adapters for sklearn and PyTorch
|
|
38
40
|
|
|
39
41
|
---
|
|
40
42
|
|
|
@@ -46,6 +48,12 @@ From PyPI:
|
|
|
46
48
|
pip install explainiverse
|
|
47
49
|
```
|
|
48
50
|
|
|
51
|
+
With PyTorch support (for neural network explanations):
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
pip install explainiverse[torch]
|
|
55
|
+
```
|
|
56
|
+
|
|
49
57
|
For development:
|
|
50
58
|
|
|
51
59
|
```bash
|
|
@@ -72,7 +80,7 @@ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
|
|
|
72
80
|
|
|
73
81
|
# List available explainers
|
|
74
82
|
print(default_registry.list_explainers())
|
|
75
|
-
# ['lime', 'shap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
|
|
83
|
+
# ['lime', 'shap', 'treeshap', 'integrated_gradients', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
|
|
76
84
|
|
|
77
85
|
# Create and use an explainer
|
|
78
86
|
explainer = default_registry.create(
|
|
@@ -91,11 +99,11 @@ print(explanation.explanation_data["feature_attributions"])
|
|
|
91
99
|
```python
|
|
92
100
|
# Find local explainers for tabular data
|
|
93
101
|
local_tabular = default_registry.filter(scope="local", data_type="tabular")
|
|
94
|
-
print(local_tabular) # ['lime', 'shap', 'anchors', 'counterfactual']
|
|
102
|
+
print(local_tabular) # ['lime', 'shap', 'treeshap', 'integrated_gradients', 'anchors', 'counterfactual']
|
|
95
103
|
|
|
96
|
-
# Find
|
|
97
|
-
|
|
98
|
-
print(
|
|
104
|
+
# Find explainers optimized for tree models
|
|
105
|
+
tree_explainers = default_registry.filter(model_type="tree")
|
|
106
|
+
print(tree_explainers) # ['treeshap']
|
|
99
107
|
|
|
100
108
|
# Get recommendations
|
|
101
109
|
recommendations = default_registry.recommend(
|
|
@@ -105,6 +113,90 @@ recommendations = default_registry.recommend(
|
|
|
105
113
|
)
|
|
106
114
|
```
|
|
107
115
|
|
|
116
|
+
### TreeSHAP for Tree Models (10x+ Faster)
|
|
117
|
+
|
|
118
|
+
```python
|
|
119
|
+
from explainiverse.explainers import TreeShapExplainer
|
|
120
|
+
from sklearn.ensemble import RandomForestClassifier
|
|
121
|
+
|
|
122
|
+
# Train a tree-based model
|
|
123
|
+
model = RandomForestClassifier(n_estimators=100).fit(X_train, y_train)
|
|
124
|
+
|
|
125
|
+
# TreeSHAP works directly with the model (no adapter needed)
|
|
126
|
+
explainer = TreeShapExplainer(
|
|
127
|
+
model=model,
|
|
128
|
+
feature_names=feature_names,
|
|
129
|
+
class_names=class_names
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Single instance explanation
|
|
133
|
+
explanation = explainer.explain(X_test[0])
|
|
134
|
+
print(explanation.explanation_data["feature_attributions"])
|
|
135
|
+
|
|
136
|
+
# Batch explanations (efficient)
|
|
137
|
+
explanations = explainer.explain_batch(X_test[:10])
|
|
138
|
+
|
|
139
|
+
# Feature interactions
|
|
140
|
+
interactions = explainer.explain_interactions(X_test[0])
|
|
141
|
+
print(interactions.explanation_data["interaction_matrix"])
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
### PyTorch Adapter for Neural Networks
|
|
145
|
+
|
|
146
|
+
```python
|
|
147
|
+
from explainiverse import PyTorchAdapter
|
|
148
|
+
import torch.nn as nn
|
|
149
|
+
|
|
150
|
+
# Define a PyTorch model
|
|
151
|
+
model = nn.Sequential(
|
|
152
|
+
nn.Linear(10, 64),
|
|
153
|
+
nn.ReLU(),
|
|
154
|
+
nn.Linear(64, 3)
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Wrap with adapter
|
|
158
|
+
adapter = PyTorchAdapter(
|
|
159
|
+
model,
|
|
160
|
+
task="classification",
|
|
161
|
+
class_names=["cat", "dog", "bird"]
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Use with any explainer
|
|
165
|
+
predictions = adapter.predict(X) # Returns numpy array
|
|
166
|
+
|
|
167
|
+
# Get gradients for attribution methods
|
|
168
|
+
predictions, gradients = adapter.predict_with_gradients(X)
|
|
169
|
+
|
|
170
|
+
# Access intermediate layers
|
|
171
|
+
activations = adapter.get_layer_output(X, layer_name="0")
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
### Integrated Gradients for Neural Networks
|
|
175
|
+
|
|
176
|
+
```python
|
|
177
|
+
from explainiverse.explainers import IntegratedGradientsExplainer
|
|
178
|
+
from explainiverse import PyTorchAdapter
|
|
179
|
+
|
|
180
|
+
# Wrap your PyTorch model
|
|
181
|
+
adapter = PyTorchAdapter(model, task="classification", class_names=class_names)
|
|
182
|
+
|
|
183
|
+
# Create IG explainer
|
|
184
|
+
explainer = IntegratedGradientsExplainer(
|
|
185
|
+
model=adapter,
|
|
186
|
+
feature_names=feature_names,
|
|
187
|
+
class_names=class_names,
|
|
188
|
+
n_steps=50 # More steps = more accurate
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# Explain a prediction
|
|
192
|
+
explanation = explainer.explain(X_test[0])
|
|
193
|
+
print(explanation.explanation_data["feature_attributions"])
|
|
194
|
+
|
|
195
|
+
# Check convergence (sum of attributions ≈ F(x) - F(baseline))
|
|
196
|
+
explanation = explainer.explain(X_test[0], return_convergence_delta=True)
|
|
197
|
+
print(f"Convergence delta: {explanation.explanation_data['convergence_delta']}")
|
|
198
|
+
```
|
|
199
|
+
|
|
108
200
|
### Using Specific Explainers
|
|
109
201
|
|
|
110
202
|
```python
|
|
@@ -205,12 +297,14 @@ poetry run pytest tests/test_new_explainers.py -v
|
|
|
205
297
|
## Roadmap
|
|
206
298
|
|
|
207
299
|
- [x] LIME, SHAP (KernelSHAP)
|
|
300
|
+
- [x] TreeSHAP (optimized for tree models) ✅
|
|
208
301
|
- [x] Anchors, Counterfactuals
|
|
209
302
|
- [x] Permutation Importance, PDP, ALE, SAGE
|
|
210
303
|
- [x] Explainer Registry with filtering
|
|
211
|
-
- [
|
|
212
|
-
- [
|
|
213
|
-
- [ ]
|
|
304
|
+
- [x] PyTorch Adapter ✅
|
|
305
|
+
- [x] Integrated Gradients ✅ NEW
|
|
306
|
+
- [ ] GradCAM for CNNs
|
|
307
|
+
- [ ] TensorFlow adapter
|
|
214
308
|
- [ ] Interactive visualization dashboard
|
|
215
309
|
|
|
216
310
|
---
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "explainiverse"
|
|
3
|
-
version = "0.2.
|
|
3
|
+
version = "0.2.3"
|
|
4
4
|
description = "Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more"
|
|
5
5
|
authors = ["Muntaser Syed <jemsbhai@gmail.com>"]
|
|
6
6
|
license = "MIT"
|
|
@@ -29,6 +29,10 @@ scikit-learn = ">=1.1,<1.6"
|
|
|
29
29
|
shap = "^0.48.0"
|
|
30
30
|
scipy = ">=1.10,<2.0"
|
|
31
31
|
xgboost = ">=1.7,<3.0"
|
|
32
|
+
torch = { version = ">=2.0", optional = true }
|
|
33
|
+
|
|
34
|
+
[tool.poetry.extras]
|
|
35
|
+
torch = ["torch"]
|
|
32
36
|
|
|
33
37
|
[tool.poetry.group.dev.dependencies]
|
|
34
38
|
pytest = "^8.0"
|
|
@@ -2,8 +2,9 @@
|
|
|
2
2
|
"""
|
|
3
3
|
Explainiverse - A unified, extensible explainability framework.
|
|
4
4
|
|
|
5
|
-
Supports multiple XAI methods including LIME, SHAP, Anchors,
|
|
6
|
-
Permutation Importance, PDP, ALE, and SAGE through a
|
|
5
|
+
Supports multiple XAI methods including LIME, SHAP, TreeSHAP, Anchors,
|
|
6
|
+
Counterfactuals, Permutation Importance, PDP, ALE, and SAGE through a
|
|
7
|
+
consistent interface.
|
|
7
8
|
|
|
8
9
|
Quick Start:
|
|
9
10
|
from explainiverse import default_registry
|
|
@@ -14,6 +15,10 @@ Quick Start:
|
|
|
14
15
|
# Create an explainer
|
|
15
16
|
explainer = default_registry.create("lime", model=adapter, training_data=X, ...)
|
|
16
17
|
explanation = explainer.explain(instance)
|
|
18
|
+
|
|
19
|
+
For PyTorch models:
|
|
20
|
+
from explainiverse import PyTorchAdapter # Requires torch
|
|
21
|
+
adapter = PyTorchAdapter(model, task="classification")
|
|
17
22
|
"""
|
|
18
23
|
|
|
19
24
|
from explainiverse.core.explainer import BaseExplainer
|
|
@@ -25,9 +30,10 @@ from explainiverse.core.registry import (
|
|
|
25
30
|
get_default_registry,
|
|
26
31
|
)
|
|
27
32
|
from explainiverse.adapters.sklearn_adapter import SklearnAdapter
|
|
33
|
+
from explainiverse.adapters import TORCH_AVAILABLE
|
|
28
34
|
from explainiverse.engine.suite import ExplanationSuite
|
|
29
35
|
|
|
30
|
-
__version__ = "0.2.
|
|
36
|
+
__version__ = "0.2.3"
|
|
31
37
|
|
|
32
38
|
__all__ = [
|
|
33
39
|
# Core
|
|
@@ -40,6 +46,12 @@ __all__ = [
|
|
|
40
46
|
"get_default_registry",
|
|
41
47
|
# Adapters
|
|
42
48
|
"SklearnAdapter",
|
|
49
|
+
"TORCH_AVAILABLE",
|
|
43
50
|
# Engine
|
|
44
51
|
"ExplanationSuite",
|
|
45
52
|
]
|
|
53
|
+
|
|
54
|
+
# Conditionally export PyTorchAdapter if torch is available
|
|
55
|
+
if TORCH_AVAILABLE:
|
|
56
|
+
from explainiverse.adapters import PyTorchAdapter
|
|
57
|
+
__all__.append("PyTorchAdapter")
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# src/explainiverse/adapters/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
Model adapters - wrappers that provide a consistent interface for different ML frameworks.
|
|
4
|
+
|
|
5
|
+
Available adapters:
|
|
6
|
+
- SklearnAdapter: For scikit-learn models (always available)
|
|
7
|
+
- PyTorchAdapter: For PyTorch nn.Module models (requires torch)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from explainiverse.adapters.base_adapter import BaseModelAdapter
|
|
11
|
+
from explainiverse.adapters.sklearn_adapter import SklearnAdapter
|
|
12
|
+
|
|
13
|
+
# Conditionally import PyTorchAdapter if torch is available
|
|
14
|
+
try:
|
|
15
|
+
from explainiverse.adapters.pytorch_adapter import PyTorchAdapter, TORCH_AVAILABLE
|
|
16
|
+
__all__ = ["BaseModelAdapter", "SklearnAdapter", "PyTorchAdapter", "TORCH_AVAILABLE"]
|
|
17
|
+
except ImportError:
|
|
18
|
+
TORCH_AVAILABLE = False
|
|
19
|
+
__all__ = ["BaseModelAdapter", "SklearnAdapter", "TORCH_AVAILABLE"]
|