explainiverse 0.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. explainiverse-0.2.5/LICENSE +21 -0
  2. explainiverse-0.2.5/PKG-INFO +390 -0
  3. explainiverse-0.2.5/README.md +359 -0
  4. explainiverse-0.2.5/pyproject.toml +68 -0
  5. explainiverse-0.2.5/src/explainiverse/__init__.py +57 -0
  6. explainiverse-0.2.5/src/explainiverse/adapters/__init__.py +19 -0
  7. explainiverse-0.2.5/src/explainiverse/adapters/base_adapter.py +25 -0
  8. explainiverse-0.2.5/src/explainiverse/adapters/pytorch_adapter.py +396 -0
  9. explainiverse-0.2.5/src/explainiverse/adapters/sklearn_adapter.py +32 -0
  10. explainiverse-0.2.5/src/explainiverse/core/__init__.py +22 -0
  11. explainiverse-0.2.5/src/explainiverse/core/explainer.py +31 -0
  12. explainiverse-0.2.5/src/explainiverse/core/explanation.py +24 -0
  13. explainiverse-0.2.5/src/explainiverse/core/registry.py +634 -0
  14. explainiverse-0.2.5/src/explainiverse/engine/__init__.py +8 -0
  15. explainiverse-0.2.5/src/explainiverse/engine/suite.py +143 -0
  16. explainiverse-0.2.5/src/explainiverse/evaluation/__init__.py +8 -0
  17. explainiverse-0.2.5/src/explainiverse/evaluation/metrics.py +233 -0
  18. explainiverse-0.2.5/src/explainiverse/explainers/__init__.py +49 -0
  19. explainiverse-0.2.5/src/explainiverse/explainers/attribution/__init__.py +10 -0
  20. explainiverse-0.2.5/src/explainiverse/explainers/attribution/lime_wrapper.py +90 -0
  21. explainiverse-0.2.5/src/explainiverse/explainers/attribution/shap_wrapper.py +89 -0
  22. explainiverse-0.2.5/src/explainiverse/explainers/attribution/treeshap_wrapper.py +434 -0
  23. explainiverse-0.2.5/src/explainiverse/explainers/counterfactual/__init__.py +8 -0
  24. explainiverse-0.2.5/src/explainiverse/explainers/counterfactual/dice_wrapper.py +302 -0
  25. explainiverse-0.2.5/src/explainiverse/explainers/global_explainers/__init__.py +23 -0
  26. explainiverse-0.2.5/src/explainiverse/explainers/global_explainers/ale.py +191 -0
  27. explainiverse-0.2.5/src/explainiverse/explainers/global_explainers/partial_dependence.py +192 -0
  28. explainiverse-0.2.5/src/explainiverse/explainers/global_explainers/permutation_importance.py +123 -0
  29. explainiverse-0.2.5/src/explainiverse/explainers/global_explainers/sage.py +164 -0
  30. explainiverse-0.2.5/src/explainiverse/explainers/gradient/__init__.py +18 -0
  31. explainiverse-0.2.5/src/explainiverse/explainers/gradient/deeplift.py +745 -0
  32. explainiverse-0.2.5/src/explainiverse/explainers/gradient/gradcam.py +390 -0
  33. explainiverse-0.2.5/src/explainiverse/explainers/gradient/integrated_gradients.py +348 -0
  34. explainiverse-0.2.5/src/explainiverse/explainers/rule_based/__init__.py +8 -0
  35. explainiverse-0.2.5/src/explainiverse/explainers/rule_based/anchors_wrapper.py +350 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Muntaser Syed
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,390 @@
1
+ Metadata-Version: 2.1
2
+ Name: explainiverse
3
+ Version: 0.2.5
4
+ Summary: Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more
5
+ Home-page: https://github.com/jemsbhai/explainiverse
6
+ License: MIT
7
+ Keywords: xai,explainability,interpretability,machine-learning,lime,shap,anchors
8
+ Author: Muntaser Syed
9
+ Author-email: jemsbhai@gmail.com
10
+ Requires-Python: >=3.10,<3.13
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Provides-Extra: torch
21
+ Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
22
+ Requires-Dist: numpy (>=1.24,<2.0)
23
+ Requires-Dist: scikit-learn (>=1.1,<1.6)
24
+ Requires-Dist: scipy (>=1.10,<2.0)
25
+ Requires-Dist: shap (>=0.48.0,<0.49.0)
26
+ Requires-Dist: torch (>=2.0) ; extra == "torch"
27
+ Requires-Dist: xgboost (>=1.7,<3.0)
28
+ Project-URL: Repository, https://github.com/jemsbhai/explainiverse
29
+ Description-Content-Type: text/markdown
30
+
31
+ # Explainiverse
32
+
33
+ **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
34
+ It provides a standardized interface for model-agnostic explainability with 11 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
35
+
36
+ ---
37
+
38
+ ## Features
39
+
40
+ ### 🎯 Comprehensive XAI Coverage
41
+
42
+ **Local Explainers** (instance-level explanations):
43
+ - **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
44
+ - **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
45
+ - **TreeSHAP** - Exact SHAP values for tree models, 10x+ faster ([Lundberg et al., 2018](https://arxiv.org/abs/1802.03888))
46
+ - **Integrated Gradients** - Axiomatic attributions for neural networks ([Sundararajan et al., 2017](https://arxiv.org/abs/1703.01365))
47
+ - **GradCAM/GradCAM++** - Visual explanations for CNNs ([Selvaraju et al., 2017](https://arxiv.org/abs/1610.02391))
48
+ - **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
49
+ - **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
50
+
51
+ **Global Explainers** (model-level explanations):
52
+ - **Permutation Importance** - Feature importance via performance degradation ([Breiman, 2001](https://link.springer.com/article/10.1023/A:1010933404324))
53
+ - **Partial Dependence (PDP)** - Marginal feature effects ([Friedman, 2001](https://projecteuclid.org/euclid.aos/1013203451))
54
+ - **ALE** - Accumulated Local Effects, unbiased for correlated features ([Apley & Zhu, 2020](https://academic.oup.com/jrsssb/article/82/4/1059/7056085))
55
+ - **SAGE** - Shapley Additive Global importancE ([Covert et al., 2020](https://arxiv.org/abs/2004.00668))
56
+
57
+ ### 🔌 Extensible Plugin Registry
58
+ - Register custom explainers with rich metadata
59
+ - Filter by scope (local/global), model type, data type
60
+ - Automatic recommendations based on use case
61
+
62
+ ### 📊 Evaluation Metrics
63
+ - **AOPC** (Area Over Perturbation Curve)
64
+ - **ROAR** (Remove And Retrain)
65
+ - Multiple baseline options and curve generation
66
+
67
+ ### 🧪 Standardized Interface
68
+ - Consistent `BaseExplainer` API
69
+ - Unified `Explanation` output format
70
+ - Model adapters for sklearn and PyTorch
71
+
72
+ ---
73
+
74
+ ## Installation
75
+
76
+ From PyPI:
77
+
78
+ ```bash
79
+ pip install explainiverse
80
+ ```
81
+
82
+ With PyTorch support (for neural network explanations):
83
+
84
+ ```bash
85
+ pip install explainiverse[torch]
86
+ ```
87
+
88
+ For development:
89
+
90
+ ```bash
91
+ git clone https://github.com/jemsbhai/explainiverse.git
92
+ cd explainiverse
93
+ poetry install
94
+ ```
95
+
96
+ ---
97
+
98
+ ## Quick Start
99
+
100
+ ### Using the Registry (Recommended)
101
+
102
+ ```python
103
+ from explainiverse import default_registry, SklearnAdapter
104
+ from sklearn.ensemble import RandomForestClassifier
105
+ from sklearn.datasets import load_iris
106
+
107
+ # Train a model
108
+ iris = load_iris()
109
+ model = RandomForestClassifier().fit(iris.data, iris.target)
110
+ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
111
+
112
+ # List available explainers
113
+ print(default_registry.list_explainers())
114
+ # ['lime', 'shap', 'treeshap', 'integrated_gradients', 'gradcam', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
115
+
116
+ # Create and use an explainer
117
+ explainer = default_registry.create(
118
+ "lime",
119
+ model=adapter,
120
+ training_data=iris.data,
121
+ feature_names=iris.feature_names,
122
+ class_names=iris.target_names.tolist()
123
+ )
124
+ explanation = explainer.explain(iris.data[0])
125
+ print(explanation.explanation_data["feature_attributions"])
126
+ ```
127
+
128
+ ### Filter Explainers by Criteria
129
+
130
+ ```python
131
+ # Find local explainers for tabular data
132
+ local_tabular = default_registry.filter(scope="local", data_type="tabular")
133
+ print(local_tabular) # ['lime', 'shap', 'treeshap', 'integrated_gradients', 'anchors', 'counterfactual']
134
+
135
+ # Find explainers for images/CNNs
136
+ image_explainers = default_registry.filter(data_type="image")
137
+ print(image_explainers) # ['lime', 'integrated_gradients', 'gradcam']
138
+
139
+ # Get recommendations
140
+ recommendations = default_registry.recommend(
141
+ model_type="any",
142
+ data_type="tabular",
143
+ scope_preference="local"
144
+ )
145
+ ```
146
+
147
+ ### TreeSHAP for Tree Models (10x+ Faster)
148
+
149
+ ```python
150
+ from explainiverse.explainers import TreeShapExplainer
151
+ from sklearn.ensemble import RandomForestClassifier
152
+
153
+ # Train a tree-based model
154
+ model = RandomForestClassifier(n_estimators=100).fit(X_train, y_train)
155
+
156
+ # TreeSHAP works directly with the model (no adapter needed)
157
+ explainer = TreeShapExplainer(
158
+ model=model,
159
+ feature_names=feature_names,
160
+ class_names=class_names
161
+ )
162
+
163
+ # Single instance explanation
164
+ explanation = explainer.explain(X_test[0])
165
+ print(explanation.explanation_data["feature_attributions"])
166
+
167
+ # Batch explanations (efficient)
168
+ explanations = explainer.explain_batch(X_test[:10])
169
+
170
+ # Feature interactions
171
+ interactions = explainer.explain_interactions(X_test[0])
172
+ print(interactions.explanation_data["interaction_matrix"])
173
+ ```
174
+
175
+ ### PyTorch Adapter for Neural Networks
176
+
177
+ ```python
178
+ from explainiverse import PyTorchAdapter
179
+ import torch.nn as nn
180
+
181
+ # Define a PyTorch model
182
+ model = nn.Sequential(
183
+ nn.Linear(10, 64),
184
+ nn.ReLU(),
185
+ nn.Linear(64, 3)
186
+ )
187
+
188
+ # Wrap with adapter
189
+ adapter = PyTorchAdapter(
190
+ model,
191
+ task="classification",
192
+ class_names=["cat", "dog", "bird"]
193
+ )
194
+
195
+ # Use with any explainer
196
+ predictions = adapter.predict(X) # Returns numpy array
197
+
198
+ # Get gradients for attribution methods
199
+ predictions, gradients = adapter.predict_with_gradients(X)
200
+
201
+ # Access intermediate layers
202
+ activations = adapter.get_layer_output(X, layer_name="0")
203
+ ```
204
+
205
+ ### Integrated Gradients for Neural Networks
206
+
207
+ ```python
208
+ from explainiverse.explainers import IntegratedGradientsExplainer
209
+ from explainiverse import PyTorchAdapter
210
+
211
+ # Wrap your PyTorch model
212
+ adapter = PyTorchAdapter(model, task="classification", class_names=class_names)
213
+
214
+ # Create IG explainer
215
+ explainer = IntegratedGradientsExplainer(
216
+ model=adapter,
217
+ feature_names=feature_names,
218
+ class_names=class_names,
219
+ n_steps=50 # More steps = more accurate
220
+ )
221
+
222
+ # Explain a prediction
223
+ explanation = explainer.explain(X_test[0])
224
+ print(explanation.explanation_data["feature_attributions"])
225
+
226
+ # Check convergence (sum of attributions ≈ F(x) - F(baseline))
227
+ explanation = explainer.explain(X_test[0], return_convergence_delta=True)
228
+ print(f"Convergence delta: {explanation.explanation_data['convergence_delta']}")
229
+ ```
230
+
231
+ ### GradCAM for CNN Visual Explanations
232
+
233
+ ```python
234
+ from explainiverse.explainers import GradCAMExplainer
235
+ from explainiverse import PyTorchAdapter
236
+
237
+ # Wrap your CNN model
238
+ adapter = PyTorchAdapter(cnn_model, task="classification", class_names=class_names)
239
+
240
+ # Find the last convolutional layer
241
+ layers = adapter.list_layers()
242
+ target_layer = "layer4" # Adjust based on your model architecture
243
+
244
+ # Create GradCAM explainer
245
+ explainer = GradCAMExplainer(
246
+ model=adapter,
247
+ target_layer=target_layer,
248
+ class_names=class_names,
249
+ method="gradcam" # or "gradcam++" for improved version
250
+ )
251
+
252
+ # Explain an image prediction
253
+ explanation = explainer.explain(image) # image shape: (C, H, W) or (N, C, H, W)
254
+ heatmap = explanation.explanation_data["heatmap"]
255
+
256
+ # Create overlay visualization
257
+ overlay = explainer.get_overlay(original_image, heatmap, alpha=0.5)
258
+ ```
259
+
260
+ ### Using Specific Explainers
261
+
262
+ ```python
263
+ # Anchors - Rule-based explanations
264
+ from explainiverse.explainers import AnchorsExplainer
265
+
266
+ anchors = AnchorsExplainer(
267
+ model=adapter,
268
+ training_data=X_train,
269
+ feature_names=feature_names,
270
+ class_names=class_names
271
+ )
272
+ explanation = anchors.explain(instance)
273
+ print(explanation.explanation_data["rules"])
274
+ # ['petal length (cm) > 2.45', 'petal width (cm) <= 1.75']
275
+
276
+ # Counterfactual - What-if explanations
277
+ from explainiverse.explainers import CounterfactualExplainer
278
+
279
+ cf = CounterfactualExplainer(
280
+ model=adapter,
281
+ training_data=X_train,
282
+ feature_names=feature_names
283
+ )
284
+ explanation = cf.explain(instance, num_counterfactuals=3)
285
+ print(explanation.explanation_data["changes"])
286
+
287
+ # SAGE - Global Shapley importance
288
+ from explainiverse.explainers import SAGEExplainer
289
+
290
+ sage = SAGEExplainer(
291
+ model=adapter,
292
+ X=X_train,
293
+ y=y_train,
294
+ feature_names=feature_names
295
+ )
296
+ explanation = sage.explain()
297
+ print(explanation.explanation_data["feature_attributions"])
298
+ ```
299
+
300
+ ### Explanation Suite (Multi-Explainer Comparison)
301
+
302
+ ```python
303
+ from explainiverse import ExplanationSuite
304
+
305
+ suite = ExplanationSuite(
306
+ model=adapter,
307
+ explainer_configs=[
308
+ ("lime", {"training_data": X_train, "feature_names": feature_names, "class_names": class_names}),
309
+ ("shap", {"background_data": X_train[:50], "feature_names": feature_names, "class_names": class_names}),
310
+ ]
311
+ )
312
+
313
+ results = suite.run(instance)
314
+ suite.compare()
315
+ ```
316
+
317
+ ---
318
+
319
+ ## Registering Custom Explainers
320
+
321
+ ```python
322
+ from explainiverse import ExplainerRegistry, ExplainerMeta, BaseExplainer
323
+
324
+ @default_registry.register_decorator(
325
+ name="my_explainer",
326
+ meta=ExplainerMeta(
327
+ scope="local",
328
+ model_types=["any"],
329
+ data_types=["tabular"],
330
+ description="My custom explainer",
331
+ paper_reference="Author et al., 2024"
332
+ )
333
+ )
334
+ class MyExplainer(BaseExplainer):
335
+ def explain(self, instance, **kwargs):
336
+ # Your implementation
337
+ return Explanation(...)
338
+ ```
339
+
340
+ ---
341
+
342
+ ## Running Tests
343
+
344
+ ```bash
345
+ # Run all tests
346
+ poetry run pytest
347
+
348
+ # Run with coverage
349
+ poetry run pytest --cov=explainiverse
350
+
351
+ # Run specific test file
352
+ poetry run pytest tests/test_new_explainers.py -v
353
+ ```
354
+
355
+ ---
356
+
357
+ ## Roadmap
358
+
359
+ - [x] LIME, SHAP (KernelSHAP)
360
+ - [x] TreeSHAP (optimized for tree models) ✅
361
+ - [x] Anchors, Counterfactuals
362
+ - [x] Permutation Importance, PDP, ALE, SAGE
363
+ - [x] Explainer Registry with filtering
364
+ - [x] PyTorch Adapter ✅
365
+ - [x] Integrated Gradients ✅
366
+ - [x] GradCAM/GradCAM++ for CNNs ✅ NEW
367
+ - [ ] TensorFlow adapter
368
+ - [ ] Interactive visualization dashboard
369
+
370
+ ---
371
+
372
+ ## Citation
373
+
374
+ If you use Explainiverse in your research, please cite:
375
+
376
+ ```bibtex
377
+ @software{explainiverse2024,
378
+ title = {Explainiverse: A Unified Framework for Explainable AI},
379
+ author = {Syed, Muntaser},
380
+ year = {2024},
381
+ url = {https://github.com/jemsbhai/explainiverse}
382
+ }
383
+ ```
384
+
385
+ ---
386
+
387
+ ## License
388
+
389
+ MIT License - see [LICENSE](LICENSE) for details.
390
+