explainiverse 0.2.0__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {explainiverse-0.2.0 → explainiverse-0.2.2}/PKG-INFO +79 -10
  2. {explainiverse-0.2.0 → explainiverse-0.2.2}/README.md +76 -9
  3. {explainiverse-0.2.0 → explainiverse-0.2.2}/pyproject.toml +5 -1
  4. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/__init__.py +15 -3
  5. explainiverse-0.2.2/src/explainiverse/adapters/__init__.py +19 -0
  6. explainiverse-0.2.2/src/explainiverse/adapters/pytorch_adapter.py +396 -0
  7. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/core/registry.py +18 -0
  8. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/__init__.py +4 -1
  9. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/attribution/__init__.py +2 -1
  10. explainiverse-0.2.2/src/explainiverse/explainers/attribution/treeshap_wrapper.py +434 -0
  11. explainiverse-0.2.0/src/explainiverse/adapters/__init__.py +0 -9
  12. {explainiverse-0.2.0 → explainiverse-0.2.2}/LICENSE +0 -0
  13. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/adapters/base_adapter.py +0 -0
  14. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/adapters/sklearn_adapter.py +0 -0
  15. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/core/__init__.py +0 -0
  16. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/core/explainer.py +0 -0
  17. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/core/explanation.py +0 -0
  18. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/engine/__init__.py +0 -0
  19. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/engine/suite.py +0 -0
  20. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/evaluation/__init__.py +0 -0
  21. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/evaluation/metrics.py +0 -0
  22. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/attribution/lime_wrapper.py +0 -0
  23. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/attribution/shap_wrapper.py +0 -0
  24. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/counterfactual/__init__.py +0 -0
  25. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/counterfactual/dice_wrapper.py +0 -0
  26. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/global_explainers/__init__.py +0 -0
  27. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/global_explainers/ale.py +0 -0
  28. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/global_explainers/partial_dependence.py +0 -0
  29. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/global_explainers/permutation_importance.py +0 -0
  30. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/global_explainers/sage.py +0 -0
  31. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/rule_based/__init__.py +0 -0
  32. {explainiverse-0.2.0 → explainiverse-0.2.2}/src/explainiverse/explainers/rule_based/anchors_wrapper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: explainiverse
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more
5
5
  Home-page: https://github.com/jemsbhai/explainiverse
6
6
  License: MIT
@@ -17,11 +17,13 @@ Classifier: Programming Language :: Python :: 3.10
17
17
  Classifier: Programming Language :: Python :: 3.11
18
18
  Classifier: Programming Language :: Python :: 3.12
19
19
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Provides-Extra: torch
20
21
  Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
21
22
  Requires-Dist: numpy (>=1.24,<2.0)
22
23
  Requires-Dist: scikit-learn (>=1.1,<1.6)
23
24
  Requires-Dist: scipy (>=1.10,<2.0)
24
25
  Requires-Dist: shap (>=0.48.0,<0.49.0)
26
+ Requires-Dist: torch (>=2.0) ; extra == "torch"
25
27
  Requires-Dist: xgboost (>=1.7,<3.0)
26
28
  Project-URL: Repository, https://github.com/jemsbhai/explainiverse
27
29
  Description-Content-Type: text/markdown
@@ -29,7 +31,7 @@ Description-Content-Type: text/markdown
29
31
  # Explainiverse
30
32
 
31
33
  **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
32
- It provides a standardized interface for model-agnostic explainability with 8 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
34
+ It provides a standardized interface for model-agnostic explainability with 9 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
33
35
 
34
36
  ---
35
37
 
@@ -40,6 +42,7 @@ It provides a standardized interface for model-agnostic explainability with 8 st
40
42
  **Local Explainers** (instance-level explanations):
41
43
  - **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
42
44
  - **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
45
+ - **TreeSHAP** - Exact SHAP values for tree models, 10x+ faster ([Lundberg et al., 2018](https://arxiv.org/abs/1802.03888))
43
46
  - **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
44
47
  - **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
45
48
 
@@ -62,7 +65,7 @@ It provides a standardized interface for model-agnostic explainability with 8 st
62
65
  ### 🧪 Standardized Interface
63
66
  - Consistent `BaseExplainer` API
64
67
  - Unified `Explanation` output format
65
- - Model adapters for sklearn and more
68
+ - Model adapters for sklearn and PyTorch
66
69
 
67
70
  ---
68
71
 
@@ -74,6 +77,12 @@ From PyPI:
74
77
  pip install explainiverse
75
78
  ```
76
79
 
80
+ With PyTorch support (for neural network explanations):
81
+
82
+ ```bash
83
+ pip install explainiverse[torch]
84
+ ```
85
+
77
86
  For development:
78
87
 
79
88
  ```bash
@@ -100,7 +109,7 @@ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
100
109
 
101
110
  # List available explainers
102
111
  print(default_registry.list_explainers())
103
- # ['lime', 'shap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
112
+ # ['lime', 'shap', 'treeshap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
104
113
 
105
114
  # Create and use an explainer
106
115
  explainer = default_registry.create(
@@ -119,11 +128,11 @@ print(explanation.explanation_data["feature_attributions"])
119
128
  ```python
120
129
  # Find local explainers for tabular data
121
130
  local_tabular = default_registry.filter(scope="local", data_type="tabular")
122
- print(local_tabular) # ['lime', 'shap', 'anchors', 'counterfactual']
131
+ print(local_tabular) # ['lime', 'shap', 'treeshap', 'anchors', 'counterfactual']
123
132
 
124
- # Find global explainers
125
- global_explainers = default_registry.filter(scope="global")
126
- print(global_explainers) # ['permutation_importance', 'partial_dependence', 'ale', 'sage']
133
+ # Find explainers optimized for tree models
134
+ tree_explainers = default_registry.filter(model_type="tree")
135
+ print(tree_explainers) # ['treeshap']
127
136
 
128
137
  # Get recommendations
129
138
  recommendations = default_registry.recommend(
@@ -133,6 +142,64 @@ recommendations = default_registry.recommend(
133
142
  )
134
143
  ```
135
144
 
145
+ ### TreeSHAP for Tree Models (10x+ Faster)
146
+
147
+ ```python
148
+ from explainiverse.explainers import TreeShapExplainer
149
+ from sklearn.ensemble import RandomForestClassifier
150
+
151
+ # Train a tree-based model
152
+ model = RandomForestClassifier(n_estimators=100).fit(X_train, y_train)
153
+
154
+ # TreeSHAP works directly with the model (no adapter needed)
155
+ explainer = TreeShapExplainer(
156
+ model=model,
157
+ feature_names=feature_names,
158
+ class_names=class_names
159
+ )
160
+
161
+ # Single instance explanation
162
+ explanation = explainer.explain(X_test[0])
163
+ print(explanation.explanation_data["feature_attributions"])
164
+
165
+ # Batch explanations (efficient)
166
+ explanations = explainer.explain_batch(X_test[:10])
167
+
168
+ # Feature interactions
169
+ interactions = explainer.explain_interactions(X_test[0])
170
+ print(interactions.explanation_data["interaction_matrix"])
171
+ ```
172
+
173
+ ### PyTorch Adapter for Neural Networks
174
+
175
+ ```python
176
+ from explainiverse import PyTorchAdapter
177
+ import torch.nn as nn
178
+
179
+ # Define a PyTorch model
180
+ model = nn.Sequential(
181
+ nn.Linear(10, 64),
182
+ nn.ReLU(),
183
+ nn.Linear(64, 3)
184
+ )
185
+
186
+ # Wrap with adapter
187
+ adapter = PyTorchAdapter(
188
+ model,
189
+ task="classification",
190
+ class_names=["cat", "dog", "bird"]
191
+ )
192
+
193
+ # Use with any explainer
194
+ predictions = adapter.predict(X) # Returns numpy array
195
+
196
+ # Get gradients for attribution methods
197
+ predictions, gradients = adapter.predict_with_gradients(X)
198
+
199
+ # Access intermediate layers
200
+ activations = adapter.get_layer_output(X, layer_name="0")
201
+ ```
202
+
136
203
  ### Using Specific Explainers
137
204
 
138
205
  ```python
@@ -233,12 +300,14 @@ poetry run pytest tests/test_new_explainers.py -v
233
300
  ## Roadmap
234
301
 
235
302
  - [x] LIME, SHAP (KernelSHAP)
303
+ - [x] TreeSHAP (optimized for tree models) ✅ NEW
236
304
  - [x] Anchors, Counterfactuals
237
305
  - [x] Permutation Importance, PDP, ALE, SAGE
238
306
  - [x] Explainer Registry with filtering
239
- - [ ] TreeSHAP (optimized for tree models)
307
+ - [x] PyTorch Adapter NEW
240
308
  - [ ] Integrated Gradients (gradient-based for neural nets)
241
- - [ ] PyTorch/TensorFlow adapters
309
+ - [ ] GradCAM for CNNs
310
+ - [ ] TensorFlow adapter
242
311
  - [ ] Interactive visualization dashboard
243
312
 
244
313
  ---
@@ -1,7 +1,7 @@
1
1
  # Explainiverse
2
2
 
3
3
  **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
4
- It provides a standardized interface for model-agnostic explainability with 8 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
4
+ It provides a standardized interface for model-agnostic explainability with 9 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
5
5
 
6
6
  ---
7
7
 
@@ -12,6 +12,7 @@ It provides a standardized interface for model-agnostic explainability with 8 st
12
12
  **Local Explainers** (instance-level explanations):
13
13
  - **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
14
14
  - **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
15
+ - **TreeSHAP** - Exact SHAP values for tree models, 10x+ faster ([Lundberg et al., 2018](https://arxiv.org/abs/1802.03888))
15
16
  - **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
16
17
  - **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
17
18
 
@@ -34,7 +35,7 @@ It provides a standardized interface for model-agnostic explainability with 8 st
34
35
  ### 🧪 Standardized Interface
35
36
  - Consistent `BaseExplainer` API
36
37
  - Unified `Explanation` output format
37
- - Model adapters for sklearn and more
38
+ - Model adapters for sklearn and PyTorch
38
39
 
39
40
  ---
40
41
 
@@ -46,6 +47,12 @@ From PyPI:
46
47
  pip install explainiverse
47
48
  ```
48
49
 
50
+ With PyTorch support (for neural network explanations):
51
+
52
+ ```bash
53
+ pip install explainiverse[torch]
54
+ ```
55
+
49
56
  For development:
50
57
 
51
58
  ```bash
@@ -72,7 +79,7 @@ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
72
79
 
73
80
  # List available explainers
74
81
  print(default_registry.list_explainers())
75
- # ['lime', 'shap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
82
+ # ['lime', 'shap', 'treeshap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
76
83
 
77
84
  # Create and use an explainer
78
85
  explainer = default_registry.create(
@@ -91,11 +98,11 @@ print(explanation.explanation_data["feature_attributions"])
91
98
  ```python
92
99
  # Find local explainers for tabular data
93
100
  local_tabular = default_registry.filter(scope="local", data_type="tabular")
94
- print(local_tabular) # ['lime', 'shap', 'anchors', 'counterfactual']
101
+ print(local_tabular) # ['lime', 'shap', 'treeshap', 'anchors', 'counterfactual']
95
102
 
96
- # Find global explainers
97
- global_explainers = default_registry.filter(scope="global")
98
- print(global_explainers) # ['permutation_importance', 'partial_dependence', 'ale', 'sage']
103
+ # Find explainers optimized for tree models
104
+ tree_explainers = default_registry.filter(model_type="tree")
105
+ print(tree_explainers) # ['treeshap']
99
106
 
100
107
  # Get recommendations
101
108
  recommendations = default_registry.recommend(
@@ -105,6 +112,64 @@ recommendations = default_registry.recommend(
105
112
  )
106
113
  ```
107
114
 
115
+ ### TreeSHAP for Tree Models (10x+ Faster)
116
+
117
+ ```python
118
+ from explainiverse.explainers import TreeShapExplainer
119
+ from sklearn.ensemble import RandomForestClassifier
120
+
121
+ # Train a tree-based model
122
+ model = RandomForestClassifier(n_estimators=100).fit(X_train, y_train)
123
+
124
+ # TreeSHAP works directly with the model (no adapter needed)
125
+ explainer = TreeShapExplainer(
126
+ model=model,
127
+ feature_names=feature_names,
128
+ class_names=class_names
129
+ )
130
+
131
+ # Single instance explanation
132
+ explanation = explainer.explain(X_test[0])
133
+ print(explanation.explanation_data["feature_attributions"])
134
+
135
+ # Batch explanations (efficient)
136
+ explanations = explainer.explain_batch(X_test[:10])
137
+
138
+ # Feature interactions
139
+ interactions = explainer.explain_interactions(X_test[0])
140
+ print(interactions.explanation_data["interaction_matrix"])
141
+ ```
142
+
143
+ ### PyTorch Adapter for Neural Networks
144
+
145
+ ```python
146
+ from explainiverse import PyTorchAdapter
147
+ import torch.nn as nn
148
+
149
+ # Define a PyTorch model
150
+ model = nn.Sequential(
151
+ nn.Linear(10, 64),
152
+ nn.ReLU(),
153
+ nn.Linear(64, 3)
154
+ )
155
+
156
+ # Wrap with adapter
157
+ adapter = PyTorchAdapter(
158
+ model,
159
+ task="classification",
160
+ class_names=["cat", "dog", "bird"]
161
+ )
162
+
163
+ # Use with any explainer
164
+ predictions = adapter.predict(X) # Returns numpy array
165
+
166
+ # Get gradients for attribution methods
167
+ predictions, gradients = adapter.predict_with_gradients(X)
168
+
169
+ # Access intermediate layers
170
+ activations = adapter.get_layer_output(X, layer_name="0")
171
+ ```
172
+
108
173
  ### Using Specific Explainers
109
174
 
110
175
  ```python
@@ -205,12 +270,14 @@ poetry run pytest tests/test_new_explainers.py -v
205
270
  ## Roadmap
206
271
 
207
272
  - [x] LIME, SHAP (KernelSHAP)
273
+ - [x] TreeSHAP (optimized for tree models) ✅ NEW
208
274
  - [x] Anchors, Counterfactuals
209
275
  - [x] Permutation Importance, PDP, ALE, SAGE
210
276
  - [x] Explainer Registry with filtering
211
- - [ ] TreeSHAP (optimized for tree models)
277
+ - [x] PyTorch Adapter NEW
212
278
  - [ ] Integrated Gradients (gradient-based for neural nets)
213
- - [ ] PyTorch/TensorFlow adapters
279
+ - [ ] GradCAM for CNNs
280
+ - [ ] TensorFlow adapter
214
281
  - [ ] Interactive visualization dashboard
215
282
 
216
283
  ---
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "explainiverse"
3
- version = "0.2.0"
3
+ version = "0.2.2"
4
4
  description = "Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more"
5
5
  authors = ["Muntaser Syed <jemsbhai@gmail.com>"]
6
6
  license = "MIT"
@@ -29,6 +29,10 @@ scikit-learn = ">=1.1,<1.6"
29
29
  shap = "^0.48.0"
30
30
  scipy = ">=1.10,<2.0"
31
31
  xgboost = ">=1.7,<3.0"
32
+ torch = { version = ">=2.0", optional = true }
33
+
34
+ [tool.poetry.extras]
35
+ torch = ["torch"]
32
36
 
33
37
  [tool.poetry.group.dev.dependencies]
34
38
  pytest = "^8.0"
@@ -2,8 +2,9 @@
2
2
  """
3
3
  Explainiverse - A unified, extensible explainability framework.
4
4
 
5
- Supports multiple XAI methods including LIME, SHAP, Anchors, Counterfactuals,
6
- Permutation Importance, PDP, ALE, and SAGE through a consistent interface.
5
+ Supports multiple XAI methods including LIME, SHAP, TreeSHAP, Anchors,
6
+ Counterfactuals, Permutation Importance, PDP, ALE, and SAGE through a
7
+ consistent interface.
7
8
 
8
9
  Quick Start:
9
10
  from explainiverse import default_registry
@@ -14,6 +15,10 @@ Quick Start:
14
15
  # Create an explainer
15
16
  explainer = default_registry.create("lime", model=adapter, training_data=X, ...)
16
17
  explanation = explainer.explain(instance)
18
+
19
+ For PyTorch models:
20
+ from explainiverse import PyTorchAdapter # Requires torch
21
+ adapter = PyTorchAdapter(model, task="classification")
17
22
  """
18
23
 
19
24
  from explainiverse.core.explainer import BaseExplainer
@@ -25,9 +30,10 @@ from explainiverse.core.registry import (
25
30
  get_default_registry,
26
31
  )
27
32
  from explainiverse.adapters.sklearn_adapter import SklearnAdapter
33
+ from explainiverse.adapters import TORCH_AVAILABLE
28
34
  from explainiverse.engine.suite import ExplanationSuite
29
35
 
30
- __version__ = "0.2.0"
36
+ __version__ = "0.2.2"
31
37
 
32
38
  __all__ = [
33
39
  # Core
@@ -40,6 +46,12 @@ __all__ = [
40
46
  "get_default_registry",
41
47
  # Adapters
42
48
  "SklearnAdapter",
49
+ "TORCH_AVAILABLE",
43
50
  # Engine
44
51
  "ExplanationSuite",
45
52
  ]
53
+
54
+ # Conditionally export PyTorchAdapter if torch is available
55
+ if TORCH_AVAILABLE:
56
+ from explainiverse.adapters import PyTorchAdapter
57
+ __all__.append("PyTorchAdapter")
@@ -0,0 +1,19 @@
1
+ # src/explainiverse/adapters/__init__.py
2
+ """
3
+ Model adapters - wrappers that provide a consistent interface for different ML frameworks.
4
+
5
+ Available adapters:
6
+ - SklearnAdapter: For scikit-learn models (always available)
7
+ - PyTorchAdapter: For PyTorch nn.Module models (requires torch)
8
+ """
9
+
10
+ from explainiverse.adapters.base_adapter import BaseModelAdapter
11
+ from explainiverse.adapters.sklearn_adapter import SklearnAdapter
12
+
13
+ # Conditionally import PyTorchAdapter if torch is available
14
+ try:
15
+ from explainiverse.adapters.pytorch_adapter import PyTorchAdapter, TORCH_AVAILABLE
16
+ __all__ = ["BaseModelAdapter", "SklearnAdapter", "PyTorchAdapter", "TORCH_AVAILABLE"]
17
+ except ImportError:
18
+ TORCH_AVAILABLE = False
19
+ __all__ = ["BaseModelAdapter", "SklearnAdapter", "TORCH_AVAILABLE"]