explainiverse 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- explainiverse/__init__.py +1 -1
- explainiverse/core/registry.py +40 -0
- explainiverse/explainers/__init__.py +8 -0
- explainiverse/explainers/example_based/__init__.py +18 -0
- explainiverse/explainers/example_based/protodash.py +826 -0
- explainiverse/explainers/gradient/__init__.py +2 -0
- explainiverse/explainers/gradient/smoothgrad.py +424 -0
- explainiverse-0.5.0.dist-info/METADATA +617 -0
- {explainiverse-0.3.0.dist-info → explainiverse-0.5.0.dist-info}/RECORD +11 -8
- explainiverse-0.3.0.dist-info/METADATA +0 -391
- {explainiverse-0.3.0.dist-info → explainiverse-0.5.0.dist-info}/LICENSE +0 -0
- {explainiverse-0.3.0.dist-info → explainiverse-0.5.0.dist-info}/WHEEL +0 -0
explainiverse/__init__.py
CHANGED
explainiverse/core/registry.py
CHANGED
|
@@ -372,6 +372,8 @@ def _create_default_registry() -> ExplainerRegistry:
|
|
|
372
372
|
from explainiverse.explainers.gradient.integrated_gradients import IntegratedGradientsExplainer
|
|
373
373
|
from explainiverse.explainers.gradient.gradcam import GradCAMExplainer
|
|
374
374
|
from explainiverse.explainers.gradient.deeplift import DeepLIFTExplainer, DeepLIFTShapExplainer
|
|
375
|
+
from explainiverse.explainers.gradient.smoothgrad import SmoothGradExplainer
|
|
376
|
+
from explainiverse.explainers.example_based.protodash import ProtoDashExplainer
|
|
375
377
|
|
|
376
378
|
registry = ExplainerRegistry()
|
|
377
379
|
|
|
@@ -532,6 +534,23 @@ def _create_default_registry() -> ExplainerRegistry:
|
|
|
532
534
|
)
|
|
533
535
|
)
|
|
534
536
|
|
|
537
|
+
# Register SmoothGrad (for neural networks)
|
|
538
|
+
registry.register(
|
|
539
|
+
name="smoothgrad",
|
|
540
|
+
explainer_class=SmoothGradExplainer,
|
|
541
|
+
meta=ExplainerMeta(
|
|
542
|
+
scope="local",
|
|
543
|
+
model_types=["neural"],
|
|
544
|
+
data_types=["tabular", "image"],
|
|
545
|
+
task_types=["classification", "regression"],
|
|
546
|
+
description="SmoothGrad - noise-averaged gradients for smoother saliency maps (requires PyTorch)",
|
|
547
|
+
paper_reference="Smilkov et al., 2017 - 'SmoothGrad: removing noise by adding noise' (ICML Workshop)",
|
|
548
|
+
complexity="O(n_samples * forward_pass)",
|
|
549
|
+
requires_training_data=False,
|
|
550
|
+
supports_batching=True
|
|
551
|
+
)
|
|
552
|
+
)
|
|
553
|
+
|
|
535
554
|
# =========================================================================
|
|
536
555
|
# Global Explainers (model-level)
|
|
537
556
|
# =========================================================================
|
|
@@ -604,6 +623,27 @@ def _create_default_registry() -> ExplainerRegistry:
|
|
|
604
623
|
)
|
|
605
624
|
)
|
|
606
625
|
|
|
626
|
+
# =========================================================================
|
|
627
|
+
# Example-Based Explainers
|
|
628
|
+
# =========================================================================
|
|
629
|
+
|
|
630
|
+
# Register ProtoDash
|
|
631
|
+
registry.register(
|
|
632
|
+
name="protodash",
|
|
633
|
+
explainer_class=ProtoDashExplainer,
|
|
634
|
+
meta=ExplainerMeta(
|
|
635
|
+
scope="local",
|
|
636
|
+
model_types=["any"],
|
|
637
|
+
data_types=["tabular"],
|
|
638
|
+
task_types=["classification", "regression"],
|
|
639
|
+
description="ProtoDash - prototype selection with importance weights for example-based explanations",
|
|
640
|
+
paper_reference="Gurumoorthy et al., 2019 - 'Efficient Data Representation by Selecting Prototypes' (ICDM)",
|
|
641
|
+
complexity="O(n_prototypes * n_samples^2)",
|
|
642
|
+
requires_training_data=True,
|
|
643
|
+
supports_batching=True
|
|
644
|
+
)
|
|
645
|
+
)
|
|
646
|
+
|
|
607
647
|
return registry
|
|
608
648
|
|
|
609
649
|
|
|
@@ -9,12 +9,17 @@ Local Explainers (instance-level):
|
|
|
9
9
|
- Anchors: High-precision rule-based explanations
|
|
10
10
|
- Counterfactual: Diverse counterfactual explanations
|
|
11
11
|
- Integrated Gradients: Gradient-based attributions for neural networks
|
|
12
|
+
- DeepLIFT: Reference-based attributions for neural networks
|
|
13
|
+
- DeepSHAP: DeepLIFT combined with SHAP for neural networks
|
|
12
14
|
|
|
13
15
|
Global Explainers (model-level):
|
|
14
16
|
- Permutation Importance: Feature importance via permutation
|
|
15
17
|
- Partial Dependence: Marginal feature effects (PDP)
|
|
16
18
|
- ALE: Accumulated Local Effects (unbiased for correlated features)
|
|
17
19
|
- SAGE: Shapley Additive Global importancE
|
|
20
|
+
|
|
21
|
+
Example-Based Explainers:
|
|
22
|
+
- ProtoDash: Prototype selection with importance weights
|
|
18
23
|
"""
|
|
19
24
|
|
|
20
25
|
from explainiverse.explainers.attribution.lime_wrapper import LimeExplainer
|
|
@@ -29,6 +34,7 @@ from explainiverse.explainers.global_explainers.sage import SAGEExplainer
|
|
|
29
34
|
from explainiverse.explainers.gradient.integrated_gradients import IntegratedGradientsExplainer
|
|
30
35
|
from explainiverse.explainers.gradient.gradcam import GradCAMExplainer
|
|
31
36
|
from explainiverse.explainers.gradient.deeplift import DeepLIFTExplainer, DeepLIFTShapExplainer
|
|
37
|
+
from explainiverse.explainers.example_based.protodash import ProtoDashExplainer
|
|
32
38
|
|
|
33
39
|
__all__ = [
|
|
34
40
|
# Local explainers
|
|
@@ -46,4 +52,6 @@ __all__ = [
|
|
|
46
52
|
"PartialDependenceExplainer",
|
|
47
53
|
"ALEExplainer",
|
|
48
54
|
"SAGEExplainer",
|
|
55
|
+
# Example-based explainers
|
|
56
|
+
"ProtoDashExplainer",
|
|
49
57
|
]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# src/explainiverse/explainers/example_based/__init__.py
|
|
2
|
+
"""
|
|
3
|
+
Example-based explanation methods.
|
|
4
|
+
|
|
5
|
+
These methods explain models by identifying representative examples
|
|
6
|
+
from the training data, rather than computing feature attributions.
|
|
7
|
+
|
|
8
|
+
Methods:
|
|
9
|
+
- ProtoDash: Select prototypical examples with importance weights
|
|
10
|
+
- (Future) Influence Functions: Identify training examples that most affect predictions
|
|
11
|
+
- (Future) MMD-Critic: Find prototypes and criticisms
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from explainiverse.explainers.example_based.protodash import ProtoDashExplainer
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"ProtoDashExplainer",
|
|
18
|
+
]
|