explainiverse 0.5.0__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {explainiverse-0.5.0 → explainiverse-0.7.0}/PKG-INFO +97 -11
  2. {explainiverse-0.5.0 → explainiverse-0.7.0}/README.md +96 -10
  3. {explainiverse-0.5.0 → explainiverse-0.7.0}/pyproject.toml +1 -1
  4. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/__init__.py +1 -1
  5. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/core/registry.py +36 -0
  6. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/gradient/__init__.py +14 -0
  7. explainiverse-0.7.0/src/explainiverse/explainers/gradient/saliency.py +293 -0
  8. explainiverse-0.7.0/src/explainiverse/explainers/gradient/tcav.py +865 -0
  9. {explainiverse-0.5.0 → explainiverse-0.7.0}/LICENSE +0 -0
  10. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/adapters/__init__.py +0 -0
  11. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/adapters/base_adapter.py +0 -0
  12. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/adapters/pytorch_adapter.py +0 -0
  13. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/adapters/sklearn_adapter.py +0 -0
  14. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/core/__init__.py +0 -0
  15. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/core/explainer.py +0 -0
  16. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/core/explanation.py +0 -0
  17. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/engine/__init__.py +0 -0
  18. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/engine/suite.py +0 -0
  19. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/evaluation/__init__.py +0 -0
  20. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/evaluation/_utils.py +0 -0
  21. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/evaluation/faithfulness.py +0 -0
  22. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/evaluation/metrics.py +0 -0
  23. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/evaluation/stability.py +0 -0
  24. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/__init__.py +0 -0
  25. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/attribution/__init__.py +0 -0
  26. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/attribution/lime_wrapper.py +0 -0
  27. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/attribution/shap_wrapper.py +0 -0
  28. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/attribution/treeshap_wrapper.py +0 -0
  29. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/counterfactual/__init__.py +0 -0
  30. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/counterfactual/dice_wrapper.py +0 -0
  31. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/example_based/__init__.py +0 -0
  32. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/example_based/protodash.py +0 -0
  33. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/global_explainers/__init__.py +0 -0
  34. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/global_explainers/ale.py +0 -0
  35. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/global_explainers/partial_dependence.py +0 -0
  36. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/global_explainers/permutation_importance.py +0 -0
  37. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/global_explainers/sage.py +0 -0
  38. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/gradient/deeplift.py +0 -0
  39. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/gradient/gradcam.py +0 -0
  40. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/gradient/integrated_gradients.py +0 -0
  41. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/gradient/smoothgrad.py +0 -0
  42. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/rule_based/__init__.py +0 -0
  43. {explainiverse-0.5.0 → explainiverse-0.7.0}/src/explainiverse/explainers/rule_based/anchors_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: explainiverse
3
- Version: 0.5.0
3
+ Version: 0.7.0
4
4
  Summary: Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more
5
5
  Home-page: https://github.com/jemsbhai/explainiverse
6
6
  License: MIT
@@ -35,7 +35,7 @@ Description-Content-Type: text/markdown
35
35
  [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
36
36
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
37
37
 
38
- **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI). It provides a standardized interface for **15 state-of-the-art explanation methods** across local, global, gradient-based, and example-based paradigms, along with **comprehensive evaluation metrics** for assessing explanation quality.
38
+ **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI). It provides a standardized interface for **17 state-of-the-art explanation methods** across local, global, gradient-based, concept-based, and example-based paradigms, along with **comprehensive evaluation metrics** for assessing explanation quality.
39
39
 
40
40
  ---
41
41
 
@@ -43,7 +43,7 @@ Description-Content-Type: text/markdown
43
43
 
44
44
  | Feature | Description |
45
45
  |---------|-------------|
46
- | **15 Explainers** | LIME, KernelSHAP, TreeSHAP, Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, GradCAM/GradCAM++, Anchors, Counterfactual, Permutation Importance, PDP, ALE, SAGE, ProtoDash |
46
+ | **17 Explainers** | LIME, KernelSHAP, TreeSHAP, Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++, TCAV, Anchors, Counterfactual, Permutation Importance, PDP, ALE, SAGE, ProtoDash |
47
47
  | **8 Evaluation Metrics** | Faithfulness (PGI, PGU, Comprehensiveness, Sufficiency, Correlation) and Stability (RIS, ROS, Lipschitz) |
48
48
  | **Unified API** | Consistent `BaseExplainer` interface with standardized `Explanation` output |
49
49
  | **Plugin Registry** | Filter explainers by scope, model type, data type; automatic recommendations |
@@ -64,7 +64,9 @@ Description-Content-Type: text/markdown
64
64
  | **DeepLIFT** | Gradient | [Shrikumar et al., 2017](https://arxiv.org/abs/1704.02685) |
65
65
  | **DeepSHAP** | Gradient + Shapley | [Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874) |
66
66
  | **SmoothGrad** | Gradient | [Smilkov et al., 2017](https://arxiv.org/abs/1706.03825) |
67
+ | **Saliency Maps** | Gradient | [Simonyan et al., 2014](https://arxiv.org/abs/1312.6034) |
67
68
  | **GradCAM / GradCAM++** | Gradient (CNN) | [Selvaraju et al., 2017](https://arxiv.org/abs/1610.02391) |
69
+ | **TCAV** | Concept-Based | [Kim et al., 2018](https://arxiv.org/abs/1711.11279) |
68
70
  | **Anchors** | Rule-Based | [Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491) |
69
71
  | **Counterfactual** | Contrastive | [Mothilal et al., 2020](https://arxiv.org/abs/1905.07697) |
70
72
  | **ProtoDash** | Example-Based | [Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) |
@@ -141,8 +143,8 @@ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
141
143
  # List all available explainers
142
144
  print(default_registry.list_explainers())
143
145
  # ['lime', 'shap', 'treeshap', 'integrated_gradients', 'deeplift', 'deepshap',
144
- # 'smoothgrad', 'gradcam', 'anchors', 'counterfactual', 'protodash',
145
- # 'permutation_importance', 'partial_dependence', 'ale', 'sage']
146
+ # 'smoothgrad', 'saliency', 'gradcam', 'tcav', 'anchors', 'counterfactual',
147
+ # 'protodash', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
146
148
 
147
149
  # Create an explainer via registry
148
150
  explainer = default_registry.create(
@@ -233,6 +235,41 @@ deepshap = DeepLIFTShapExplainer(
233
235
  explanation = deepshap.explain(X[0])
234
236
  ```
235
237
 
238
+ ### Saliency Maps
239
+
240
+ ```python
241
+ from explainiverse.explainers.gradient import SaliencyExplainer
242
+
243
+ # Saliency Maps - simplest and fastest gradient method
244
+ explainer = SaliencyExplainer(
245
+ model=adapter,
246
+ feature_names=feature_names,
247
+ class_names=class_names,
248
+ absolute_value=True # Default: absolute gradient magnitudes
249
+ )
250
+
251
+ # Standard saliency (absolute gradients)
252
+ explanation = explainer.explain(X[0], method="saliency")
253
+
254
+ # Input × Gradient (gradient scaled by input values)
255
+ explanation = explainer.explain(X[0], method="input_times_gradient")
256
+
257
+ # Signed saliency (keep gradient direction)
258
+ explainer_signed = SaliencyExplainer(
259
+ model=adapter,
260
+ feature_names=feature_names,
261
+ class_names=class_names,
262
+ absolute_value=False
263
+ )
264
+ explanation = explainer_signed.explain(X[0])
265
+
266
+ # Compare all variants
267
+ variants = explainer.compute_all_variants(X[0])
268
+ print(variants["saliency_absolute"])
269
+ print(variants["saliency_signed"])
270
+ print(variants["input_times_gradient"])
271
+ ```
272
+
236
273
  ### SmoothGrad
237
274
 
238
275
  ```python
@@ -281,6 +318,56 @@ heatmap = explanation.explanation_data["heatmap"]
281
318
  overlay = explainer.get_overlay(original_image, heatmap, alpha=0.5)
282
319
  ```
283
320
 
321
+ ### TCAV (Concept-Based Explanations)
322
+
323
+ ```python
324
+ from explainiverse.explainers.gradient import TCAVExplainer
325
+
326
+ # For neural network models with concept examples
327
+ adapter = PyTorchAdapter(model, task="classification", class_names=class_names)
328
+
329
+ # Create TCAV explainer targeting a specific layer
330
+ explainer = TCAVExplainer(
331
+ model=adapter,
332
+ layer_name="layer3", # Target layer for concept analysis
333
+ class_names=class_names
334
+ )
335
+
336
+ # Learn a concept from examples (e.g., "striped" pattern)
337
+ explainer.learn_concept(
338
+ concept_name="striped",
339
+ concept_examples=striped_images, # Images with stripes
340
+ negative_examples=random_images, # Random images without stripes
341
+ min_accuracy=0.6 # Minimum CAV classifier accuracy
342
+ )
343
+
344
+ # Compute TCAV score: fraction of inputs where concept positively influences prediction
345
+ tcav_score = explainer.compute_tcav_score(
346
+ test_inputs=test_images,
347
+ target_class=0, # e.g., "zebra"
348
+ concept_name="striped"
349
+ )
350
+ print(f"TCAV score: {tcav_score:.3f}") # >0.5 means concept positively influences class
351
+
352
+ # Statistical significance testing against random concepts
353
+ result = explainer.statistical_significance_test(
354
+ test_inputs=test_images,
355
+ target_class=0,
356
+ concept_name="striped",
357
+ n_random=10,
358
+ negative_examples=random_images
359
+ )
360
+ print(f"p-value: {result['p_value']:.4f}, significant: {result['significant']}")
361
+
362
+ # Full explanation with multiple concepts
363
+ explanation = explainer.explain(
364
+ test_inputs=test_images,
365
+ target_class=0,
366
+ run_significance_test=True
367
+ )
368
+ print(explanation.explanation_data["tcav_scores"])
369
+ ```
370
+
284
371
  ---
285
372
 
286
373
  ## Example-Based Explanations
@@ -515,7 +602,7 @@ explainiverse/
515
602
  │ └── pytorch_adapter.py # With gradient support
516
603
  ├── explainers/
517
604
  │ ├── attribution/ # LIME, SHAP, TreeSHAP
518
- │ ├── gradient/ # IG, DeepLIFT, DeepSHAP, SmoothGrad, GradCAM
605
+ │ ├── gradient/ # IG, DeepLIFT, DeepSHAP, SmoothGrad, Saliency, GradCAM, TCAV
519
606
  │ ├── rule_based/ # Anchors
520
607
  │ ├── counterfactual/ # DiCE-style
521
608
  │ ├── global_explainers/ # Permutation, PDP, ALE, SAGE
@@ -552,7 +639,8 @@ poetry run pytest tests/test_smoothgrad.py::TestSmoothGradBasic -v
552
639
  ### Completed ✅
553
640
  - [x] Core framework (BaseExplainer, Explanation, Registry)
554
641
  - [x] Perturbation methods: LIME, KernelSHAP, TreeSHAP
555
- - [x] Gradient methods: Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, GradCAM/GradCAM++
642
+ - [x] Gradient methods: Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++
643
+ - [x] Concept-based: TCAV (Testing with Concept Activation Vectors)
556
644
  - [x] Rule-based: Anchors
557
645
  - [x] Counterfactual: DiCE-style
558
646
  - [x] Global: Permutation Importance, PDP, ALE, SAGE
@@ -562,8 +650,6 @@ poetry run pytest tests/test_smoothgrad.py::TestSmoothGradBasic -v
562
650
  - [x] PyTorch adapter with gradient support
563
651
 
564
652
  ### In Progress 🚧
565
- - [ ] Saliency Maps (vanilla gradients)
566
- - [ ] TCAV (Testing with Concept Activation Vectors)
567
653
  - [ ] Layer-wise Relevance Propagation (LRP)
568
654
 
569
655
  ### Planned 📋
@@ -585,7 +671,7 @@ If you use Explainiverse in your research, please cite:
585
671
  author = {Syed, Muntaser},
586
672
  year = {2025},
587
673
  url = {https://github.com/jemsbhai/explainiverse},
588
- version = {0.5.0}
674
+ version = {0.7.0}
589
675
  }
590
676
  ```
591
677
 
@@ -613,5 +699,5 @@ MIT License - see [LICENSE](LICENSE) for details.
613
699
 
614
700
  ## Acknowledgments
615
701
 
616
- Explainiverse builds upon the foundational work of many researchers in the XAI community. We thank the authors of LIME, SHAP, Integrated Gradients, DeepLIFT, GradCAM, Anchors, DiCE, ALE, SAGE, and ProtoDash for their contributions to interpretable machine learning.
702
+ Explainiverse builds upon the foundational work of many researchers in the XAI community. We thank the authors of LIME, SHAP, Integrated Gradients, DeepLIFT, GradCAM, TCAV, Anchors, DiCE, ALE, SAGE, and ProtoDash for their contributions to interpretable machine learning.
617
703
 
@@ -4,7 +4,7 @@
4
4
  [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
5
5
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
- **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI). It provides a standardized interface for **15 state-of-the-art explanation methods** across local, global, gradient-based, and example-based paradigms, along with **comprehensive evaluation metrics** for assessing explanation quality.
7
+ **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI). It provides a standardized interface for **17 state-of-the-art explanation methods** across local, global, gradient-based, concept-based, and example-based paradigms, along with **comprehensive evaluation metrics** for assessing explanation quality.
8
8
 
9
9
  ---
10
10
 
@@ -12,7 +12,7 @@
12
12
 
13
13
  | Feature | Description |
14
14
  |---------|-------------|
15
- | **15 Explainers** | LIME, KernelSHAP, TreeSHAP, Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, GradCAM/GradCAM++, Anchors, Counterfactual, Permutation Importance, PDP, ALE, SAGE, ProtoDash |
15
+ | **17 Explainers** | LIME, KernelSHAP, TreeSHAP, Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++, TCAV, Anchors, Counterfactual, Permutation Importance, PDP, ALE, SAGE, ProtoDash |
16
16
  | **8 Evaluation Metrics** | Faithfulness (PGI, PGU, Comprehensiveness, Sufficiency, Correlation) and Stability (RIS, ROS, Lipschitz) |
17
17
  | **Unified API** | Consistent `BaseExplainer` interface with standardized `Explanation` output |
18
18
  | **Plugin Registry** | Filter explainers by scope, model type, data type; automatic recommendations |
@@ -33,7 +33,9 @@
33
33
  | **DeepLIFT** | Gradient | [Shrikumar et al., 2017](https://arxiv.org/abs/1704.02685) |
34
34
  | **DeepSHAP** | Gradient + Shapley | [Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874) |
35
35
  | **SmoothGrad** | Gradient | [Smilkov et al., 2017](https://arxiv.org/abs/1706.03825) |
36
+ | **Saliency Maps** | Gradient | [Simonyan et al., 2014](https://arxiv.org/abs/1312.6034) |
36
37
  | **GradCAM / GradCAM++** | Gradient (CNN) | [Selvaraju et al., 2017](https://arxiv.org/abs/1610.02391) |
38
+ | **TCAV** | Concept-Based | [Kim et al., 2018](https://arxiv.org/abs/1711.11279) |
37
39
  | **Anchors** | Rule-Based | [Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491) |
38
40
  | **Counterfactual** | Contrastive | [Mothilal et al., 2020](https://arxiv.org/abs/1905.07697) |
39
41
  | **ProtoDash** | Example-Based | [Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) |
@@ -110,8 +112,8 @@ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
110
112
  # List all available explainers
111
113
  print(default_registry.list_explainers())
112
114
  # ['lime', 'shap', 'treeshap', 'integrated_gradients', 'deeplift', 'deepshap',
113
- # 'smoothgrad', 'gradcam', 'anchors', 'counterfactual', 'protodash',
114
- # 'permutation_importance', 'partial_dependence', 'ale', 'sage']
115
+ # 'smoothgrad', 'saliency', 'gradcam', 'tcav', 'anchors', 'counterfactual',
116
+ # 'protodash', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
115
117
 
116
118
  # Create an explainer via registry
117
119
  explainer = default_registry.create(
@@ -202,6 +204,41 @@ deepshap = DeepLIFTShapExplainer(
202
204
  explanation = deepshap.explain(X[0])
203
205
  ```
204
206
 
207
+ ### Saliency Maps
208
+
209
+ ```python
210
+ from explainiverse.explainers.gradient import SaliencyExplainer
211
+
212
+ # Saliency Maps - simplest and fastest gradient method
213
+ explainer = SaliencyExplainer(
214
+ model=adapter,
215
+ feature_names=feature_names,
216
+ class_names=class_names,
217
+ absolute_value=True # Default: absolute gradient magnitudes
218
+ )
219
+
220
+ # Standard saliency (absolute gradients)
221
+ explanation = explainer.explain(X[0], method="saliency")
222
+
223
+ # Input × Gradient (gradient scaled by input values)
224
+ explanation = explainer.explain(X[0], method="input_times_gradient")
225
+
226
+ # Signed saliency (keep gradient direction)
227
+ explainer_signed = SaliencyExplainer(
228
+ model=adapter,
229
+ feature_names=feature_names,
230
+ class_names=class_names,
231
+ absolute_value=False
232
+ )
233
+ explanation = explainer_signed.explain(X[0])
234
+
235
+ # Compare all variants
236
+ variants = explainer.compute_all_variants(X[0])
237
+ print(variants["saliency_absolute"])
238
+ print(variants["saliency_signed"])
239
+ print(variants["input_times_gradient"])
240
+ ```
241
+
205
242
  ### SmoothGrad
206
243
 
207
244
  ```python
@@ -250,6 +287,56 @@ heatmap = explanation.explanation_data["heatmap"]
250
287
  overlay = explainer.get_overlay(original_image, heatmap, alpha=0.5)
251
288
  ```
252
289
 
290
+ ### TCAV (Concept-Based Explanations)
291
+
292
+ ```python
293
+ from explainiverse.explainers.gradient import TCAVExplainer
294
+
295
+ # For neural network models with concept examples
296
+ adapter = PyTorchAdapter(model, task="classification", class_names=class_names)
297
+
298
+ # Create TCAV explainer targeting a specific layer
299
+ explainer = TCAVExplainer(
300
+ model=adapter,
301
+ layer_name="layer3", # Target layer for concept analysis
302
+ class_names=class_names
303
+ )
304
+
305
+ # Learn a concept from examples (e.g., "striped" pattern)
306
+ explainer.learn_concept(
307
+ concept_name="striped",
308
+ concept_examples=striped_images, # Images with stripes
309
+ negative_examples=random_images, # Random images without stripes
310
+ min_accuracy=0.6 # Minimum CAV classifier accuracy
311
+ )
312
+
313
+ # Compute TCAV score: fraction of inputs where concept positively influences prediction
314
+ tcav_score = explainer.compute_tcav_score(
315
+ test_inputs=test_images,
316
+ target_class=0, # e.g., "zebra"
317
+ concept_name="striped"
318
+ )
319
+ print(f"TCAV score: {tcav_score:.3f}") # >0.5 means concept positively influences class
320
+
321
+ # Statistical significance testing against random concepts
322
+ result = explainer.statistical_significance_test(
323
+ test_inputs=test_images,
324
+ target_class=0,
325
+ concept_name="striped",
326
+ n_random=10,
327
+ negative_examples=random_images
328
+ )
329
+ print(f"p-value: {result['p_value']:.4f}, significant: {result['significant']}")
330
+
331
+ # Full explanation with multiple concepts
332
+ explanation = explainer.explain(
333
+ test_inputs=test_images,
334
+ target_class=0,
335
+ run_significance_test=True
336
+ )
337
+ print(explanation.explanation_data["tcav_scores"])
338
+ ```
339
+
253
340
  ---
254
341
 
255
342
  ## Example-Based Explanations
@@ -484,7 +571,7 @@ explainiverse/
484
571
  │ └── pytorch_adapter.py # With gradient support
485
572
  ├── explainers/
486
573
  │ ├── attribution/ # LIME, SHAP, TreeSHAP
487
- │ ├── gradient/ # IG, DeepLIFT, DeepSHAP, SmoothGrad, GradCAM
574
+ │ ├── gradient/ # IG, DeepLIFT, DeepSHAP, SmoothGrad, Saliency, GradCAM, TCAV
488
575
  │ ├── rule_based/ # Anchors
489
576
  │ ├── counterfactual/ # DiCE-style
490
577
  │ ├── global_explainers/ # Permutation, PDP, ALE, SAGE
@@ -521,7 +608,8 @@ poetry run pytest tests/test_smoothgrad.py::TestSmoothGradBasic -v
521
608
  ### Completed ✅
522
609
  - [x] Core framework (BaseExplainer, Explanation, Registry)
523
610
  - [x] Perturbation methods: LIME, KernelSHAP, TreeSHAP
524
- - [x] Gradient methods: Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, GradCAM/GradCAM++
611
+ - [x] Gradient methods: Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++
612
+ - [x] Concept-based: TCAV (Testing with Concept Activation Vectors)
525
613
  - [x] Rule-based: Anchors
526
614
  - [x] Counterfactual: DiCE-style
527
615
  - [x] Global: Permutation Importance, PDP, ALE, SAGE
@@ -531,8 +619,6 @@ poetry run pytest tests/test_smoothgrad.py::TestSmoothGradBasic -v
531
619
  - [x] PyTorch adapter with gradient support
532
620
 
533
621
  ### In Progress 🚧
534
- - [ ] Saliency Maps (vanilla gradients)
535
- - [ ] TCAV (Testing with Concept Activation Vectors)
536
622
  - [ ] Layer-wise Relevance Propagation (LRP)
537
623
 
538
624
  ### Planned 📋
@@ -554,7 +640,7 @@ If you use Explainiverse in your research, please cite:
554
640
  author = {Syed, Muntaser},
555
641
  year = {2025},
556
642
  url = {https://github.com/jemsbhai/explainiverse},
557
- version = {0.5.0}
643
+ version = {0.7.0}
558
644
  }
559
645
  ```
560
646
 
@@ -582,4 +668,4 @@ MIT License - see [LICENSE](LICENSE) for details.
582
668
 
583
669
  ## Acknowledgments
584
670
 
585
- Explainiverse builds upon the foundational work of many researchers in the XAI community. We thank the authors of LIME, SHAP, Integrated Gradients, DeepLIFT, GradCAM, Anchors, DiCE, ALE, SAGE, and ProtoDash for their contributions to interpretable machine learning.
671
+ Explainiverse builds upon the foundational work of many researchers in the XAI community. We thank the authors of LIME, SHAP, Integrated Gradients, DeepLIFT, GradCAM, TCAV, Anchors, DiCE, ALE, SAGE, and ProtoDash for their contributions to interpretable machine learning.
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "explainiverse"
3
- version = "0.5.0"
3
+ version = "0.7.0"
4
4
  description = "Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more"
5
5
  authors = ["Muntaser Syed <jemsbhai@gmail.com>"]
6
6
  license = "MIT"
@@ -33,7 +33,7 @@ from explainiverse.adapters.sklearn_adapter import SklearnAdapter
33
33
  from explainiverse.adapters import TORCH_AVAILABLE
34
34
  from explainiverse.engine.suite import ExplanationSuite
35
35
 
36
- __version__ = "0.5.0"
36
+ __version__ = "0.7.0"
37
37
 
38
38
  __all__ = [
39
39
  # Core
@@ -373,6 +373,8 @@ def _create_default_registry() -> ExplainerRegistry:
373
373
  from explainiverse.explainers.gradient.gradcam import GradCAMExplainer
374
374
  from explainiverse.explainers.gradient.deeplift import DeepLIFTExplainer, DeepLIFTShapExplainer
375
375
  from explainiverse.explainers.gradient.smoothgrad import SmoothGradExplainer
376
+ from explainiverse.explainers.gradient.saliency import SaliencyExplainer
377
+ from explainiverse.explainers.gradient.tcav import TCAVExplainer
376
378
  from explainiverse.explainers.example_based.protodash import ProtoDashExplainer
377
379
 
378
380
  registry = ExplainerRegistry()
@@ -551,6 +553,40 @@ def _create_default_registry() -> ExplainerRegistry:
551
553
  )
552
554
  )
553
555
 
556
+ # Register Saliency Maps (for neural networks)
557
+ registry.register(
558
+ name="saliency",
559
+ explainer_class=SaliencyExplainer,
560
+ meta=ExplainerMeta(
561
+ scope="local",
562
+ model_types=["neural"],
563
+ data_types=["tabular", "image"],
564
+ task_types=["classification", "regression"],
565
+ description="Saliency Maps - gradient-based feature attribution (requires PyTorch)",
566
+ paper_reference="Simonyan et al., 2014 - 'Deep Inside Convolutional Networks' (ICLR Workshop)",
567
+ complexity="O(forward_pass + backward_pass)",
568
+ requires_training_data=False,
569
+ supports_batching=True
570
+ )
571
+ )
572
+
573
+ # Register TCAV (Concept-based explanations for neural networks)
574
+ registry.register(
575
+ name="tcav",
576
+ explainer_class=TCAVExplainer,
577
+ meta=ExplainerMeta(
578
+ scope="local",
579
+ model_types=["neural"],
580
+ data_types=["tabular", "image"],
581
+ task_types=["classification"],
582
+ description="TCAV - Testing with Concept Activation Vectors for concept-based explanations (requires PyTorch)",
583
+ paper_reference="Kim et al., 2018 - 'Interpretability Beyond Feature Attribution: Quantitative Testing with Concept Activation Vectors' (ICML)",
584
+ complexity="O(n_concepts * n_test_inputs * forward_pass)",
585
+ requires_training_data=True,
586
+ supports_batching=True
587
+ )
588
+ )
589
+
554
590
  # =========================================================================
555
591
  # Global Explainers (model-level)
556
592
  # =========================================================================
@@ -4,12 +4,23 @@ Gradient-based explainers for neural networks.
4
4
 
5
5
  These explainers require models that support gradient computation,
6
6
  typically via the PyTorchAdapter.
7
+
8
+ Explainers:
9
+ - IntegratedGradientsExplainer: Axiomatic attributions via path integration
10
+ - GradCAMExplainer: Visual explanations for CNNs
11
+ - DeepLIFTExplainer: Reference-based attribution
12
+ - DeepLIFTShapExplainer: DeepLIFT + SHAP combination
13
+ - SmoothGradExplainer: Noise-averaged gradients
14
+ - SaliencyExplainer: Basic gradient attribution
15
+ - TCAVExplainer: Concept-based explanations (TCAV)
7
16
  """
8
17
 
9
18
  from explainiverse.explainers.gradient.integrated_gradients import IntegratedGradientsExplainer
10
19
  from explainiverse.explainers.gradient.gradcam import GradCAMExplainer
11
20
  from explainiverse.explainers.gradient.deeplift import DeepLIFTExplainer, DeepLIFTShapExplainer
12
21
  from explainiverse.explainers.gradient.smoothgrad import SmoothGradExplainer
22
+ from explainiverse.explainers.gradient.saliency import SaliencyExplainer
23
+ from explainiverse.explainers.gradient.tcav import TCAVExplainer, ConceptActivationVector
13
24
 
14
25
  __all__ = [
15
26
  "IntegratedGradientsExplainer",
@@ -17,4 +28,7 @@ __all__ = [
17
28
  "DeepLIFTExplainer",
18
29
  "DeepLIFTShapExplainer",
19
30
  "SmoothGradExplainer",
31
+ "SaliencyExplainer",
32
+ "TCAVExplainer",
33
+ "ConceptActivationVector",
20
34
  ]