explainiverse 0.4.0__tar.gz → 0.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. explainiverse-0.6.0/PKG-INFO +652 -0
  2. explainiverse-0.6.0/README.md +620 -0
  3. {explainiverse-0.4.0 → explainiverse-0.6.0}/pyproject.toml +1 -1
  4. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/__init__.py +1 -1
  5. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/core/registry.py +36 -0
  6. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/gradient/__init__.py +4 -0
  7. explainiverse-0.6.0/src/explainiverse/explainers/gradient/saliency.py +293 -0
  8. explainiverse-0.6.0/src/explainiverse/explainers/gradient/smoothgrad.py +424 -0
  9. explainiverse-0.4.0/PKG-INFO +0 -391
  10. explainiverse-0.4.0/README.md +0 -359
  11. {explainiverse-0.4.0 → explainiverse-0.6.0}/LICENSE +0 -0
  12. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/adapters/__init__.py +0 -0
  13. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/adapters/base_adapter.py +0 -0
  14. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/adapters/pytorch_adapter.py +0 -0
  15. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/adapters/sklearn_adapter.py +0 -0
  16. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/core/__init__.py +0 -0
  17. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/core/explainer.py +0 -0
  18. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/core/explanation.py +0 -0
  19. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/engine/__init__.py +0 -0
  20. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/engine/suite.py +0 -0
  21. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/evaluation/__init__.py +0 -0
  22. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/evaluation/_utils.py +0 -0
  23. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/evaluation/faithfulness.py +0 -0
  24. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/evaluation/metrics.py +0 -0
  25. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/evaluation/stability.py +0 -0
  26. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/__init__.py +0 -0
  27. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/attribution/__init__.py +0 -0
  28. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/attribution/lime_wrapper.py +0 -0
  29. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/attribution/shap_wrapper.py +0 -0
  30. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/attribution/treeshap_wrapper.py +0 -0
  31. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/counterfactual/__init__.py +0 -0
  32. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/counterfactual/dice_wrapper.py +0 -0
  33. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/example_based/__init__.py +0 -0
  34. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/example_based/protodash.py +0 -0
  35. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/global_explainers/__init__.py +0 -0
  36. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/global_explainers/ale.py +0 -0
  37. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/global_explainers/partial_dependence.py +0 -0
  38. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/global_explainers/permutation_importance.py +0 -0
  39. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/global_explainers/sage.py +0 -0
  40. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/gradient/deeplift.py +0 -0
  41. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/gradient/gradcam.py +0 -0
  42. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/gradient/integrated_gradients.py +0 -0
  43. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/rule_based/__init__.py +0 -0
  44. {explainiverse-0.4.0 → explainiverse-0.6.0}/src/explainiverse/explainers/rule_based/anchors_wrapper.py +0 -0
@@ -0,0 +1,652 @@
1
+ Metadata-Version: 2.1
2
+ Name: explainiverse
3
+ Version: 0.6.0
4
+ Summary: Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more
5
+ Home-page: https://github.com/jemsbhai/explainiverse
6
+ License: MIT
7
+ Keywords: xai,explainability,interpretability,machine-learning,lime,shap,anchors
8
+ Author: Muntaser Syed
9
+ Author-email: jemsbhai@gmail.com
10
+ Requires-Python: >=3.10,<3.13
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Provides-Extra: torch
21
+ Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
22
+ Requires-Dist: numpy (>=1.24,<2.0)
23
+ Requires-Dist: pandas (>=1.5,<3.0)
24
+ Requires-Dist: scikit-learn (>=1.1,<1.6)
25
+ Requires-Dist: scipy (>=1.10,<2.0)
26
+ Requires-Dist: shap (>=0.48.0,<0.49.0)
27
+ Requires-Dist: torch (>=2.0) ; extra == "torch"
28
+ Requires-Dist: xgboost (>=1.7,<3.0)
29
+ Project-URL: Repository, https://github.com/jemsbhai/explainiverse
30
+ Description-Content-Type: text/markdown
31
+
32
+ # Explainiverse
33
+
34
+ [![PyPI version](https://badge.fury.io/py/explainiverse.svg)](https://badge.fury.io/py/explainiverse)
35
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
36
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
37
+
38
+ **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI). It provides a standardized interface for **16 state-of-the-art explanation methods** across local, global, gradient-based, and example-based paradigms, along with **comprehensive evaluation metrics** for assessing explanation quality.
39
+
40
+ ---
41
+
42
+ ## Key Features
43
+
44
+ | Feature | Description |
45
+ |---------|-------------|
46
+ | **16 Explainers** | LIME, KernelSHAP, TreeSHAP, Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++, Anchors, Counterfactual, Permutation Importance, PDP, ALE, SAGE, ProtoDash |
47
+ | **8 Evaluation Metrics** | Faithfulness (PGI, PGU, Comprehensiveness, Sufficiency, Correlation) and Stability (RIS, ROS, Lipschitz) |
48
+ | **Unified API** | Consistent `BaseExplainer` interface with standardized `Explanation` output |
49
+ | **Plugin Registry** | Filter explainers by scope, model type, data type; automatic recommendations |
50
+ | **Framework Support** | Adapters for scikit-learn and PyTorch (with gradient computation) |
51
+
52
+ ---
53
+
54
+ ## Explainer Coverage
55
+
56
+ ### Local Explainers (Instance-Level)
57
+
58
+ | Method | Type | Reference |
59
+ |--------|------|-----------|
60
+ | **LIME** | Perturbation | [Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938) |
61
+ | **KernelSHAP** | Perturbation | [Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874) |
62
+ | **TreeSHAP** | Exact (Trees) | [Lundberg et al., 2018](https://arxiv.org/abs/1802.03888) |
63
+ | **Integrated Gradients** | Gradient | [Sundararajan et al., 2017](https://arxiv.org/abs/1703.01365) |
64
+ | **DeepLIFT** | Gradient | [Shrikumar et al., 2017](https://arxiv.org/abs/1704.02685) |
65
+ | **DeepSHAP** | Gradient + Shapley | [Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874) |
66
+ | **SmoothGrad** | Gradient | [Smilkov et al., 2017](https://arxiv.org/abs/1706.03825) |
67
+ | **Saliency Maps** | Gradient | [Simonyan et al., 2014](https://arxiv.org/abs/1312.6034) |
68
+ | **GradCAM / GradCAM++** | Gradient (CNN) | [Selvaraju et al., 2017](https://arxiv.org/abs/1610.02391) |
69
+ | **Anchors** | Rule-Based | [Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491) |
70
+ | **Counterfactual** | Contrastive | [Mothilal et al., 2020](https://arxiv.org/abs/1905.07697) |
71
+ | **ProtoDash** | Example-Based | [Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) |
72
+
73
+ ### Global Explainers (Model-Level)
74
+
75
+ | Method | Type | Reference |
76
+ |--------|------|-----------|
77
+ | **Permutation Importance** | Feature Importance | [Breiman, 2001](https://link.springer.com/article/10.1023/A:1010933404324) |
78
+ | **Partial Dependence (PDP)** | Feature Effect | [Friedman, 2001](https://projecteuclid.org/euclid.aos/1013203451) |
79
+ | **ALE** | Feature Effect | [Apley & Zhu, 2020](https://academic.oup.com/jrsssb/article/82/4/1059/7056085) |
80
+ | **SAGE** | Shapley Importance | [Covert et al., 2020](https://arxiv.org/abs/2004.00668) |
81
+
82
+ ---
83
+
84
+ ## Evaluation Metrics
85
+
86
+ Explainiverse includes a comprehensive suite of evaluation metrics based on the XAI literature:
87
+
88
+ ### Faithfulness Metrics
89
+
90
+ | Metric | Description | Reference |
91
+ |--------|-------------|-----------|
92
+ | **PGI** | Prediction Gap on Important features | [Petsiuk et al., 2018](https://arxiv.org/abs/1806.07421) |
93
+ | **PGU** | Prediction Gap on Unimportant features | [Petsiuk et al., 2018](https://arxiv.org/abs/1806.07421) |
94
+ | **Comprehensiveness** | Drop when removing top-k features | [DeYoung et al., 2020](https://arxiv.org/abs/1911.03429) |
95
+ | **Sufficiency** | Prediction using only top-k features | [DeYoung et al., 2020](https://arxiv.org/abs/1911.03429) |
96
+ | **Faithfulness Correlation** | Correlation between attribution and impact | [Bhatt et al., 2020](https://arxiv.org/abs/2005.00631) |
97
+
98
+ ### Stability Metrics
99
+
100
+ | Metric | Description | Reference |
101
+ |--------|-------------|-----------|
102
+ | **RIS** | Relative Input Stability | [Agarwal et al., 2022](https://arxiv.org/abs/2203.06877) |
103
+ | **ROS** | Relative Output Stability | [Agarwal et al., 2022](https://arxiv.org/abs/2203.06877) |
104
+ | **Lipschitz Estimate** | Local Lipschitz continuity | [Alvarez-Melis & Jaakkola, 2018](https://arxiv.org/abs/1806.08049) |
105
+
106
+ ---
107
+
108
+ ## Installation
109
+
110
+ ```bash
111
+ # From PyPI
112
+ pip install explainiverse
113
+
114
+ # With PyTorch support (for gradient-based methods)
115
+ pip install explainiverse[torch]
116
+
117
+ # For development
118
+ git clone https://github.com/jemsbhai/explainiverse.git
119
+ cd explainiverse
120
+ poetry install
121
+ ```
122
+
123
+ ---
124
+
125
+ ## Quick Start
126
+
127
+ ### Basic Usage with Registry
128
+
129
+ ```python
130
+ from explainiverse import default_registry, SklearnAdapter
131
+ from sklearn.ensemble import RandomForestClassifier
132
+ from sklearn.datasets import load_iris
133
+
134
+ # Train a model
135
+ iris = load_iris()
136
+ model = RandomForestClassifier(n_estimators=100, random_state=42)
137
+ model.fit(iris.data, iris.target)
138
+
139
+ # Wrap with adapter
140
+ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
141
+
142
+ # List all available explainers
143
+ print(default_registry.list_explainers())
144
+ # ['lime', 'shap', 'treeshap', 'integrated_gradients', 'deeplift', 'deepshap',
145
+ # 'smoothgrad', 'gradcam', 'anchors', 'counterfactual', 'protodash',
146
+ # 'permutation_importance', 'partial_dependence', 'ale', 'sage']
147
+
148
+ # Create an explainer via registry
149
+ explainer = default_registry.create(
150
+ "lime",
151
+ model=adapter,
152
+ training_data=iris.data,
153
+ feature_names=iris.feature_names.tolist(),
154
+ class_names=iris.target_names.tolist()
155
+ )
156
+
157
+ # Generate explanation
158
+ explanation = explainer.explain(iris.data[0])
159
+ print(explanation.explanation_data["feature_attributions"])
160
+ ```
161
+
162
+ ### Filter and Recommend Explainers
163
+
164
+ ```python
165
+ # Filter by criteria
166
+ local_explainers = default_registry.filter(scope="local", data_type="tabular")
167
+ neural_explainers = default_registry.filter(model_type="neural")
168
+ image_explainers = default_registry.filter(data_type="image")
169
+
170
+ # Get recommendations
171
+ recommendations = default_registry.recommend(
172
+ model_type="neural",
173
+ data_type="tabular",
174
+ scope_preference="local",
175
+ max_results=5
176
+ )
177
+ ```
178
+
179
+ ---
180
+
181
+ ## Gradient-Based Explainers (PyTorch)
182
+
183
+ ### Integrated Gradients
184
+
185
+ ```python
186
+ from explainiverse import PyTorchAdapter
187
+ from explainiverse.explainers.gradient import IntegratedGradientsExplainer
188
+ import torch.nn as nn
189
+
190
+ # Define and wrap model
191
+ model = nn.Sequential(
192
+ nn.Linear(10, 64), nn.ReLU(),
193
+ nn.Linear(64, 32), nn.ReLU(),
194
+ nn.Linear(32, 3)
195
+ )
196
+ adapter = PyTorchAdapter(model, task="classification", class_names=["A", "B", "C"])
197
+
198
+ # Create explainer
199
+ explainer = IntegratedGradientsExplainer(
200
+ model=adapter,
201
+ feature_names=[f"feature_{i}" for i in range(10)],
202
+ class_names=["A", "B", "C"],
203
+ n_steps=50,
204
+ method="riemann_trapezoid"
205
+ )
206
+
207
+ # Explain with convergence check
208
+ explanation = explainer.explain(X[0], return_convergence_delta=True)
209
+ print(f"Attributions: {explanation.explanation_data['feature_attributions']}")
210
+ print(f"Convergence δ: {explanation.explanation_data['convergence_delta']:.6f}")
211
+ ```
212
+
213
+ ### DeepLIFT and DeepSHAP
214
+
215
+ ```python
216
+ from explainiverse.explainers.gradient import DeepLIFTExplainer, DeepLIFTShapExplainer
217
+
218
+ # DeepLIFT - Fast reference-based attributions
219
+ deeplift = DeepLIFTExplainer(
220
+ model=adapter,
221
+ feature_names=feature_names,
222
+ class_names=class_names,
223
+ baseline=None # Uses zero baseline by default
224
+ )
225
+ explanation = deeplift.explain(X[0])
226
+
227
+ # DeepSHAP - DeepLIFT averaged over background samples
228
+ deepshap = DeepLIFTShapExplainer(
229
+ model=adapter,
230
+ feature_names=feature_names,
231
+ class_names=class_names,
232
+ background_data=X_train[:100]
233
+ )
234
+ explanation = deepshap.explain(X[0])
235
+ ```
236
+
237
+ ### Saliency Maps
238
+
239
+ ```python
240
+ from explainiverse.explainers.gradient import SaliencyExplainer
241
+
242
+ # Saliency Maps - simplest and fastest gradient method
243
+ explainer = SaliencyExplainer(
244
+ model=adapter,
245
+ feature_names=feature_names,
246
+ class_names=class_names,
247
+ absolute_value=True # Default: absolute gradient magnitudes
248
+ )
249
+
250
+ # Standard saliency (absolute gradients)
251
+ explanation = explainer.explain(X[0], method="saliency")
252
+
253
+ # Input × Gradient (gradient scaled by input values)
254
+ explanation = explainer.explain(X[0], method="input_times_gradient")
255
+
256
+ # Signed saliency (keep gradient direction)
257
+ explainer_signed = SaliencyExplainer(
258
+ model=adapter,
259
+ feature_names=feature_names,
260
+ class_names=class_names,
261
+ absolute_value=False
262
+ )
263
+ explanation = explainer_signed.explain(X[0])
264
+
265
+ # Compare all variants
266
+ variants = explainer.compute_all_variants(X[0])
267
+ print(variants["saliency_absolute"])
268
+ print(variants["saliency_signed"])
269
+ print(variants["input_times_gradient"])
270
+ ```
271
+
272
+ ### SmoothGrad
273
+
274
+ ```python
275
+ from explainiverse.explainers.gradient import SmoothGradExplainer
276
+
277
+ # SmoothGrad - Noise-averaged gradients for smoother saliency
278
+ explainer = SmoothGradExplainer(
279
+ model=adapter,
280
+ feature_names=feature_names,
281
+ class_names=class_names,
282
+ n_samples=50,
283
+ noise_scale=0.15,
284
+ noise_type="gaussian" # or "uniform"
285
+ )
286
+
287
+ # Standard SmoothGrad
288
+ explanation = explainer.explain(X[0], method="smoothgrad")
289
+
290
+ # SmoothGrad-Squared (sharper attributions)
291
+ explanation = explainer.explain(X[0], method="smoothgrad_squared")
292
+
293
+ # VarGrad (variance of gradients)
294
+ explanation = explainer.explain(X[0], method="vargrad")
295
+
296
+ # With absolute values
297
+ explanation = explainer.explain(X[0], absolute_value=True)
298
+ ```
299
+
300
+ ### GradCAM for CNNs
301
+
302
+ ```python
303
+ from explainiverse.explainers.gradient import GradCAMExplainer
304
+
305
+ # For CNN models
306
+ adapter = PyTorchAdapter(cnn_model, task="classification", class_names=class_names)
307
+
308
+ explainer = GradCAMExplainer(
309
+ model=adapter,
310
+ target_layer="layer4", # Last conv layer
311
+ class_names=class_names,
312
+ method="gradcam++" # or "gradcam"
313
+ )
314
+
315
+ explanation = explainer.explain(image)
316
+ heatmap = explanation.explanation_data["heatmap"]
317
+ overlay = explainer.get_overlay(original_image, heatmap, alpha=0.5)
318
+ ```
319
+
320
+ ---
321
+
322
+ ## Example-Based Explanations
323
+
324
+ ### ProtoDash
325
+
326
+ ```python
327
+ from explainiverse.explainers.example_based import ProtoDashExplainer
328
+
329
+ explainer = ProtoDashExplainer(
330
+ model=adapter,
331
+ training_data=X_train,
332
+ feature_names=feature_names,
333
+ n_prototypes=5,
334
+ kernel="rbf",
335
+ gamma=0.1
336
+ )
337
+
338
+ explanation = explainer.explain(X_test[0])
339
+ print(explanation.explanation_data["prototype_indices"])
340
+ print(explanation.explanation_data["prototype_weights"])
341
+ ```
342
+
343
+ ---
344
+
345
+ ## Evaluation Metrics
346
+
347
+ ### Faithfulness Evaluation
348
+
349
+ ```python
350
+ from explainiverse.evaluation import (
351
+ compute_pgi, compute_pgu,
352
+ compute_comprehensiveness, compute_sufficiency,
353
+ compute_faithfulness_correlation
354
+ )
355
+
356
+ # PGI - Higher is better (important features affect predictions)
357
+ pgi = compute_pgi(
358
+ model=adapter,
359
+ instance=X[0],
360
+ attributions=attributions,
361
+ feature_names=feature_names,
362
+ top_k=3
363
+ )
364
+
365
+ # PGU - Lower is better (unimportant features don't affect predictions)
366
+ pgu = compute_pgu(
367
+ model=adapter,
368
+ instance=X[0],
369
+ attributions=attributions,
370
+ feature_names=feature_names,
371
+ top_k=3
372
+ )
373
+
374
+ # Comprehensiveness - Higher is better
375
+ comp = compute_comprehensiveness(
376
+ model=adapter,
377
+ instance=X[0],
378
+ attributions=attributions,
379
+ feature_names=feature_names,
380
+ top_k_values=[1, 2, 3, 5]
381
+ )
382
+
383
+ # Sufficiency - Lower is better
384
+ suff = compute_sufficiency(
385
+ model=adapter,
386
+ instance=X[0],
387
+ attributions=attributions,
388
+ feature_names=feature_names,
389
+ top_k_values=[1, 2, 3, 5]
390
+ )
391
+
392
+ # Faithfulness Correlation
393
+ corr = compute_faithfulness_correlation(
394
+ model=adapter,
395
+ instance=X[0],
396
+ attributions=attributions,
397
+ feature_names=feature_names
398
+ )
399
+ ```
400
+
401
+ ### Stability Evaluation
402
+
403
+ ```python
404
+ from explainiverse.evaluation import (
405
+ compute_ris, compute_ros, compute_lipschitz_estimate
406
+ )
407
+
408
+ # RIS - Relative Input Stability (lower is better)
409
+ ris = compute_ris(
410
+ explainer=explainer,
411
+ instance=X[0],
412
+ n_perturbations=10,
413
+ perturbation_scale=0.1
414
+ )
415
+
416
+ # ROS - Relative Output Stability (lower is better)
417
+ ros = compute_ros(
418
+ model=adapter,
419
+ explainer=explainer,
420
+ instance=X[0],
421
+ n_perturbations=10,
422
+ perturbation_scale=0.1
423
+ )
424
+
425
+ # Lipschitz Estimate (lower is better)
426
+ lipschitz = compute_lipschitz_estimate(
427
+ explainer=explainer,
428
+ instance=X[0],
429
+ n_perturbations=20,
430
+ perturbation_scale=0.1
431
+ )
432
+ ```
433
+
434
+ ---
435
+
436
+ ## Global Explainers
437
+
438
+ ```python
439
+ from explainiverse.explainers import (
440
+ PermutationImportanceExplainer,
441
+ PartialDependenceExplainer,
442
+ ALEExplainer,
443
+ SAGEExplainer
444
+ )
445
+
446
+ # Permutation Importance
447
+ perm_imp = PermutationImportanceExplainer(
448
+ model=adapter,
449
+ X=X_test,
450
+ y=y_test,
451
+ feature_names=feature_names,
452
+ n_repeats=10
453
+ )
454
+ explanation = perm_imp.explain()
455
+
456
+ # Partial Dependence Plot
457
+ pdp = PartialDependenceExplainer(
458
+ model=adapter,
459
+ X=X_train,
460
+ feature_names=feature_names
461
+ )
462
+ explanation = pdp.explain(feature="feature_0", grid_resolution=50)
463
+
464
+ # ALE (handles correlated features)
465
+ ale = ALEExplainer(
466
+ model=adapter,
467
+ X=X_train,
468
+ feature_names=feature_names
469
+ )
470
+ explanation = ale.explain(feature="feature_0", n_bins=20)
471
+
472
+ # SAGE (global Shapley importance)
473
+ sage = SAGEExplainer(
474
+ model=adapter,
475
+ X=X_train,
476
+ y=y_train,
477
+ feature_names=feature_names,
478
+ n_permutations=512
479
+ )
480
+ explanation = sage.explain()
481
+ ```
482
+
483
+ ---
484
+
485
+ ## Multi-Explainer Comparison
486
+
487
+ ```python
488
+ from explainiverse import ExplanationSuite
489
+
490
+ suite = ExplanationSuite(
491
+ model=adapter,
492
+ explainer_configs=[
493
+ ("lime", {"training_data": X_train, "feature_names": feature_names, "class_names": class_names}),
494
+ ("shap", {"background_data": X_train[:50], "feature_names": feature_names, "class_names": class_names}),
495
+ ("treeshap", {"feature_names": feature_names, "class_names": class_names}),
496
+ ]
497
+ )
498
+
499
+ results = suite.run(X_test[0])
500
+ suite.compare()
501
+ ```
502
+
503
+ ---
504
+
505
+ ## Custom Explainer Registration
506
+
507
+ ```python
508
+ from explainiverse import default_registry, ExplainerMeta, BaseExplainer, Explanation
509
+
510
+ @default_registry.register_decorator(
511
+ name="my_explainer",
512
+ meta=ExplainerMeta(
513
+ scope="local",
514
+ model_types=["any"],
515
+ data_types=["tabular"],
516
+ task_types=["classification", "regression"],
517
+ description="My custom explainer",
518
+ paper_reference="Author et al., 2024",
519
+ complexity="O(n)",
520
+ requires_training_data=False,
521
+ supports_batching=True
522
+ )
523
+ )
524
+ class MyExplainer(BaseExplainer):
525
+ def __init__(self, model, feature_names, **kwargs):
526
+ super().__init__(model)
527
+ self.feature_names = feature_names
528
+
529
+ def explain(self, instance, **kwargs):
530
+ # Your implementation
531
+ attributions = self._compute_attributions(instance)
532
+ return Explanation(
533
+ explainer_name="MyExplainer",
534
+ target_class="output",
535
+ explanation_data={"feature_attributions": attributions}
536
+ )
537
+ ```
538
+
539
+ ---
540
+
541
+ ## Architecture
542
+
543
+ ```
544
+ explainiverse/
545
+ ├── core/
546
+ │ ├── explainer.py # BaseExplainer abstract class
547
+ │ ├── explanation.py # Unified Explanation container
548
+ │ └── registry.py # ExplainerRegistry with metadata
549
+ ├── adapters/
550
+ │ ├── sklearn_adapter.py
551
+ │ └── pytorch_adapter.py # With gradient support
552
+ ├── explainers/
553
+ │ ├── attribution/ # LIME, SHAP, TreeSHAP
554
+ │ ├── gradient/ # IG, DeepLIFT, DeepSHAP, SmoothGrad, GradCAM
555
+ │ ├── rule_based/ # Anchors
556
+ │ ├── counterfactual/ # DiCE-style
557
+ │ ├── global_explainers/ # Permutation, PDP, ALE, SAGE
558
+ │ └── example_based/ # ProtoDash
559
+ ├── evaluation/
560
+ │ ├── faithfulness.py # PGI, PGU, Comprehensiveness, Sufficiency
561
+ │ └── stability.py # RIS, ROS, Lipschitz
562
+ └── engine/
563
+ └── suite.py # Multi-explainer comparison
564
+ ```
565
+
566
+ ---
567
+
568
+ ## Running Tests
569
+
570
+ ```bash
571
+ # Run all tests
572
+ poetry run pytest
573
+
574
+ # Run with coverage
575
+ poetry run pytest --cov=explainiverse --cov-report=html
576
+
577
+ # Run specific test file
578
+ poetry run pytest tests/test_smoothgrad.py -v
579
+
580
+ # Run specific test class
581
+ poetry run pytest tests/test_smoothgrad.py::TestSmoothGradBasic -v
582
+ ```
583
+
584
+ ---
585
+
586
+ ## Roadmap
587
+
588
+ ### Completed ✅
589
+ - [x] Core framework (BaseExplainer, Explanation, Registry)
590
+ - [x] Perturbation methods: LIME, KernelSHAP, TreeSHAP
591
+ - [x] Gradient methods: Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++
592
+ - [x] Rule-based: Anchors
593
+ - [x] Counterfactual: DiCE-style
594
+ - [x] Global: Permutation Importance, PDP, ALE, SAGE
595
+ - [x] Example-based: ProtoDash
596
+ - [x] Evaluation: Faithfulness metrics (PGI, PGU, Comprehensiveness, Sufficiency, Correlation)
597
+ - [x] Evaluation: Stability metrics (RIS, ROS, Lipschitz)
598
+ - [x] PyTorch adapter with gradient support
599
+
600
+ ### In Progress 🚧
601
+ - [ ] TCAV (Testing with Concept Activation Vectors)
602
+ - [ ] Layer-wise Relevance Propagation (LRP)
603
+
604
+ ### Planned 📋
605
+ - [ ] Attention-based explanations (for Transformers)
606
+ - [ ] TensorFlow/Keras adapter
607
+ - [ ] Interactive visualization dashboard
608
+ - [ ] Explanation caching and serialization
609
+ - [ ] Distributed computation support
610
+
611
+ ---
612
+
613
+ ## Citation
614
+
615
+ If you use Explainiverse in your research, please cite:
616
+
617
+ ```bibtex
618
+ @software{explainiverse2025,
619
+ title = {Explainiverse: A Unified Framework for Explainable AI},
620
+ author = {Syed, Muntaser},
621
+ year = {2025},
622
+ url = {https://github.com/jemsbhai/explainiverse},
623
+ version = {0.6.0}
624
+ }
625
+ ```
626
+
627
+ ---
628
+
629
+ ## Contributing
630
+
631
+ Contributions are welcome! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
632
+
633
+ 1. Fork the repository
634
+ 2. Create a feature branch (`git checkout -b feature/amazing-feature`)
635
+ 3. Write tests for your changes
636
+ 4. Ensure all tests pass (`poetry run pytest`)
637
+ 5. Commit your changes (`git commit -m 'Add amazing feature'`)
638
+ 6. Push to the branch (`git push origin feature/amazing-feature`)
639
+ 7. Open a Pull Request
640
+
641
+ ---
642
+
643
+ ## License
644
+
645
+ MIT License - see [LICENSE](LICENSE) for details.
646
+
647
+ ---
648
+
649
+ ## Acknowledgments
650
+
651
+ Explainiverse builds upon the foundational work of many researchers in the XAI community. We thank the authors of LIME, SHAP, Integrated Gradients, DeepLIFT, GradCAM, Anchors, DiCE, ALE, SAGE, and ProtoDash for their contributions to interpretable machine learning.
652
+