explainiverse 0.1.0a1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- explainiverse-0.1.0a1/LICENSE +21 -0
- explainiverse-0.1.0a1/PKG-INFO +83 -0
- explainiverse-0.1.0a1/README.md +61 -0
- explainiverse-0.1.0a1/pyproject.toml +26 -0
- explainiverse-0.1.0a1/src/explainiverse/__init__.py +1 -0
- explainiverse-0.1.0a1/src/explainiverse/adapters/__init__.py +0 -0
- explainiverse-0.1.0a1/src/explainiverse/adapters/base_adapter.py +25 -0
- explainiverse-0.1.0a1/src/explainiverse/adapters/sklearn_adapter.py +32 -0
- explainiverse-0.1.0a1/src/explainiverse/core/__init__.py +0 -0
- explainiverse-0.1.0a1/src/explainiverse/core/explainer.py +31 -0
- explainiverse-0.1.0a1/src/explainiverse/core/explanation.py +24 -0
- explainiverse-0.1.0a1/src/explainiverse/explainers/__init__.py +0 -0
- explainiverse-0.1.0a1/src/explainiverse/explainers/attribution/__init__.py +0 -0
- explainiverse-0.1.0a1/src/explainiverse/explainers/attribution/lime_wrapper.py +63 -0
- explainiverse-0.1.0a1/src/explainiverse/explainers/attribution/shap_wrapper.py +66 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Muntaser Syed
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: explainiverse
|
|
3
|
+
Version: 0.1.0a1
|
|
4
|
+
Summary: Unified, extensible explainability framework supporting LIME, SHAP, and custom adapters
|
|
5
|
+
Home-page: https://github.com/jemsbhai/explainiverse
|
|
6
|
+
License: MIT
|
|
7
|
+
Author: Muntaser Syed
|
|
8
|
+
Author-email: jemsbhai@gmail.com
|
|
9
|
+
Requires-Python: >=3.10,<3.13
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
|
|
16
|
+
Requires-Dist: numpy (==1.24.4)
|
|
17
|
+
Requires-Dist: scikit-learn (>=1.1,<1.4)
|
|
18
|
+
Requires-Dist: shap (>=0.48.0,<0.49.0)
|
|
19
|
+
Project-URL: Repository, https://github.com/jemsbhai/explainiverse
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
|
|
22
|
+
# Explainiverse
|
|
23
|
+
|
|
24
|
+
Explainiverse is a unified, extensible, and testable Python framework for explainable AI (XAI).
|
|
25
|
+
It provides a consistent API and support for post-hoc explainers like LIME and SHAP, model adapters, and rigorous evaluation strategies.
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
## Features
|
|
30
|
+
|
|
31
|
+
- Standardized Explainer interface (`BaseExplainer`)
|
|
32
|
+
- Support for classification, regression, and multi-class models
|
|
33
|
+
- Integrated explainers:
|
|
34
|
+
- LIME (Local surrogate models)
|
|
35
|
+
- SHAP (KernelExplainer with per-class and global support)
|
|
36
|
+
- Adapter layer for scikit-learn models
|
|
37
|
+
- Explanation object with structured output and future extensibility for `.plot()`
|
|
38
|
+
- Full unit test suite covering classification, regression, global/cohort SHAP, and adapter behavior
|
|
39
|
+
|
|
40
|
+
---
|
|
41
|
+
|
|
42
|
+
## Installation
|
|
43
|
+
|
|
44
|
+
This package will soon be available on PyPI.
|
|
45
|
+
|
|
46
|
+
For development use:
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
git clone https://github.com/YOUR_USERNAME/explainiverse.git
|
|
50
|
+
cd explainiverse
|
|
51
|
+
poetry install
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
---
|
|
55
|
+
|
|
56
|
+
## Running Tests
|
|
57
|
+
|
|
58
|
+
All tests can be run using:
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
poetry run python tests/test_all.py
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
For individual component testing:
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
poetry run python tests/test_shap_explainer.py
|
|
68
|
+
poetry run python tests/test_lime_explainer.py
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
---
|
|
72
|
+
|
|
73
|
+
## Documentation
|
|
74
|
+
|
|
75
|
+
Documentation is currently in development.
|
|
76
|
+
Until then, test files (especially `test_shap_explainer.py`) demonstrate usage and structure.
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
## License
|
|
81
|
+
|
|
82
|
+
This project is licensed under the MIT License.
|
|
83
|
+
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# Explainiverse
|
|
2
|
+
|
|
3
|
+
Explainiverse is a unified, extensible, and testable Python framework for explainable AI (XAI).
|
|
4
|
+
It provides a consistent API and support for post-hoc explainers like LIME and SHAP, model adapters, and rigorous evaluation strategies.
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Features
|
|
9
|
+
|
|
10
|
+
- Standardized Explainer interface (`BaseExplainer`)
|
|
11
|
+
- Support for classification, regression, and multi-class models
|
|
12
|
+
- Integrated explainers:
|
|
13
|
+
- LIME (Local surrogate models)
|
|
14
|
+
- SHAP (KernelExplainer with per-class and global support)
|
|
15
|
+
- Adapter layer for scikit-learn models
|
|
16
|
+
- Explanation object with structured output and future extensibility for `.plot()`
|
|
17
|
+
- Full unit test suite covering classification, regression, global/cohort SHAP, and adapter behavior
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
This package will soon be available on PyPI.
|
|
24
|
+
|
|
25
|
+
For development use:
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
git clone https://github.com/YOUR_USERNAME/explainiverse.git
|
|
29
|
+
cd explainiverse
|
|
30
|
+
poetry install
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Running Tests
|
|
36
|
+
|
|
37
|
+
All tests can be run using:
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
poetry run python tests/test_all.py
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
For individual component testing:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
poetry run python tests/test_shap_explainer.py
|
|
47
|
+
poetry run python tests/test_lime_explainer.py
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## Documentation
|
|
53
|
+
|
|
54
|
+
Documentation is currently in development.
|
|
55
|
+
Until then, test files (especially `test_shap_explainer.py`) demonstrate usage and structure.
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
|
|
59
|
+
## License
|
|
60
|
+
|
|
61
|
+
This project is licensed under the MIT License.
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "explainiverse"
|
|
3
|
+
version = "0.1.0a1"
|
|
4
|
+
description = "Unified, extensible explainability framework supporting LIME, SHAP, and custom adapters"
|
|
5
|
+
authors = ["Muntaser Syed <jemsbhai@gmail.com>"]
|
|
6
|
+
license = "MIT"
|
|
7
|
+
readme = "README.md"
|
|
8
|
+
repository = "https://github.com/jemsbhai/explainiverse"
|
|
9
|
+
homepage = "https://github.com/jemsbhai/explainiverse"
|
|
10
|
+
packages = [{ include = "explainiverse", from = "src" }]
|
|
11
|
+
|
|
12
|
+
[tool.poetry.dependencies]
|
|
13
|
+
python = ">=3.10,<3.13"
|
|
14
|
+
numpy = "1.24.4"
|
|
15
|
+
lime = "^0.2.0.1"
|
|
16
|
+
scikit-learn = ">=1.1,<1.4"
|
|
17
|
+
shap = "^0.48.0"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
[build-system]
|
|
21
|
+
requires = ["poetry-core"]
|
|
22
|
+
build-backend = "poetry.core.masonry.api"
|
|
23
|
+
|
|
24
|
+
[project.urls]
|
|
25
|
+
"Bug Tracker" = "https://github.com/jemsbhai/explainiverse/issues"
|
|
26
|
+
"Documentation" = "https://github.com/jemsbhai/explainiverse#readme"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Explainiverse Init
|
|
File without changes
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# src/explainiverse/adapters/base_adapter.py
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
|
|
5
|
+
class BaseModelAdapter(ABC):
|
|
6
|
+
"""
|
|
7
|
+
Abstract base class for all model adapters.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def __init__(self, model, feature_names=None):
|
|
11
|
+
self.model = model
|
|
12
|
+
self.feature_names = feature_names
|
|
13
|
+
|
|
14
|
+
@abstractmethod
|
|
15
|
+
def predict(self, data):
|
|
16
|
+
"""
|
|
17
|
+
Returns prediction probabilities or outputs in a standard format.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
data: Input data (single instance or batch).
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
List or NumPy array of prediction scores.
|
|
24
|
+
"""
|
|
25
|
+
pass
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# src/explainiverse/adapters/sklearn_adapter.py
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from .base_adapter import BaseModelAdapter
|
|
5
|
+
|
|
6
|
+
class SklearnAdapter(BaseModelAdapter):
|
|
7
|
+
"""
|
|
8
|
+
Adapter for Scikit-learn classifiers.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, model, feature_names=None, class_names=None):
|
|
12
|
+
super().__init__(model, feature_names)
|
|
13
|
+
self.class_names = class_names
|
|
14
|
+
|
|
15
|
+
def predict(self, data: np.ndarray) -> np.ndarray:
|
|
16
|
+
"""
|
|
17
|
+
Returns prediction probabilities.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
data: A 2D numpy array of inputs.
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
Array of shape (n_samples, n_classes).
|
|
24
|
+
"""
|
|
25
|
+
if hasattr(self.model, "predict_proba"):
|
|
26
|
+
return self.model.predict_proba(data)
|
|
27
|
+
else:
|
|
28
|
+
preds = self.model.predict(data)
|
|
29
|
+
if self.class_names:
|
|
30
|
+
return np.eye(len(self.class_names))[preds]
|
|
31
|
+
else:
|
|
32
|
+
return preds.reshape(-1, 1) # regression: raw outputs
|
|
File without changes
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# src/explainiverse/core/explainer.py
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
|
|
5
|
+
class BaseExplainer(ABC):
|
|
6
|
+
"""
|
|
7
|
+
Abstract base class for all explainers in Explainiverse.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def __init__(self, model):
|
|
11
|
+
"""
|
|
12
|
+
Initialize with a model adapter or raw model.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
model: A wrapped ML model with a standardized `predict` method.
|
|
16
|
+
"""
|
|
17
|
+
self.model = model
|
|
18
|
+
|
|
19
|
+
@abstractmethod
|
|
20
|
+
def explain(self, instance, **kwargs):
|
|
21
|
+
"""
|
|
22
|
+
Generate an explanation for a single input instance.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
instance: The input to explain (e.g., feature vector, image, text).
|
|
26
|
+
**kwargs: Optional method-specific parameters.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
An Explanation object or dict.
|
|
30
|
+
"""
|
|
31
|
+
pass
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# src/explainiverse/core/explanation.py
|
|
2
|
+
|
|
3
|
+
class Explanation:
|
|
4
|
+
"""
|
|
5
|
+
Unified container for explanation results.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
def __init__(self, explainer_name: str, target_class: str, explanation_data: dict):
|
|
9
|
+
self.explainer_name = explainer_name
|
|
10
|
+
self.target_class = target_class
|
|
11
|
+
self.explanation_data = explanation_data # e.g., {'feature_attributions': {...}}
|
|
12
|
+
|
|
13
|
+
def __repr__(self):
|
|
14
|
+
return (f"Explanation(explainer='{self.explainer_name}', "
|
|
15
|
+
f"target='{self.target_class}', "
|
|
16
|
+
f"keys={list(self.explanation_data.keys())})")
|
|
17
|
+
|
|
18
|
+
def plot(self, type='bar'):
|
|
19
|
+
"""
|
|
20
|
+
Visualizes the explanation.
|
|
21
|
+
This will later integrate with a proper visualization backend.
|
|
22
|
+
"""
|
|
23
|
+
print(f"[plot: {type}] Plotting explanation for {self.target_class} "
|
|
24
|
+
f"from {self.explainer_name}.")
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# src/explainiverse/explainers/attribution/lime_wrapper.py
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from lime.lime_tabular import LimeTabularExplainer
|
|
5
|
+
|
|
6
|
+
from explainiverse.core.explainer import BaseExplainer
|
|
7
|
+
from explainiverse.core.explanation import Explanation
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LimeExplainer(BaseExplainer):
|
|
11
|
+
"""
|
|
12
|
+
Wrapper for LIME that conforms to the BaseExplainer API.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, model, training_data, feature_names, class_names, mode="classification"):
|
|
16
|
+
"""
|
|
17
|
+
Args:
|
|
18
|
+
model: A model adapter (implements .predict()).
|
|
19
|
+
training_data: The data used to initialize LIME (2D np.ndarray).
|
|
20
|
+
feature_names: List of feature names.
|
|
21
|
+
class_names: List of class names.
|
|
22
|
+
mode: 'classification' or 'regression'.
|
|
23
|
+
"""
|
|
24
|
+
super().__init__(model)
|
|
25
|
+
self.feature_names = feature_names
|
|
26
|
+
self.class_names = class_names
|
|
27
|
+
self.mode = mode
|
|
28
|
+
|
|
29
|
+
self.explainer = LimeTabularExplainer(
|
|
30
|
+
training_data=training_data,
|
|
31
|
+
feature_names=feature_names,
|
|
32
|
+
class_names=class_names,
|
|
33
|
+
mode=mode
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
def explain(self, instance, num_features=5, top_labels=1):
|
|
37
|
+
"""
|
|
38
|
+
Generate a local explanation for the given instance.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
instance: 1D numpy array (single row)
|
|
42
|
+
num_features: Number of top features to include
|
|
43
|
+
top_labels: Number of top labels to explain
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Explanation object
|
|
47
|
+
"""
|
|
48
|
+
lime_exp = self.explainer.explain_instance(
|
|
49
|
+
data_row=instance,
|
|
50
|
+
predict_fn=self.model.predict,
|
|
51
|
+
num_features=num_features,
|
|
52
|
+
top_labels=top_labels
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
label_index = lime_exp.top_labels[0]
|
|
56
|
+
label_name = self.class_names[label_index]
|
|
57
|
+
attributions = dict(lime_exp.as_list(label=label_index))
|
|
58
|
+
|
|
59
|
+
return Explanation(
|
|
60
|
+
explainer_name="LIME",
|
|
61
|
+
target_class=label_name,
|
|
62
|
+
explanation_data={"feature_attributions": attributions}
|
|
63
|
+
)
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# src/explainiverse/explainers/attribution/shap_wrapper.py
|
|
2
|
+
|
|
3
|
+
import shap
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
from explainiverse.core.explainer import BaseExplainer
|
|
7
|
+
from explainiverse.core.explanation import Explanation
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ShapExplainer(BaseExplainer):
|
|
11
|
+
"""
|
|
12
|
+
SHAP explainer (KernelSHAP-based) for model-agnostic explanations.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, model, background_data, feature_names, class_names):
|
|
16
|
+
"""
|
|
17
|
+
Args:
|
|
18
|
+
model: A model adapter with a .predict method.
|
|
19
|
+
background_data: A 2D numpy array used as SHAP background distribution.
|
|
20
|
+
feature_names: List of feature names.
|
|
21
|
+
class_names: List of class labels.
|
|
22
|
+
"""
|
|
23
|
+
super().__init__(model)
|
|
24
|
+
self.feature_names = feature_names
|
|
25
|
+
self.class_names = class_names
|
|
26
|
+
self.explainer = shap.KernelExplainer(model.predict, background_data)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def explain(self, instance, top_labels=1):
|
|
30
|
+
"""
|
|
31
|
+
Generate SHAP explanation for a single instance.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
instance: 1D numpy array of input features.
|
|
35
|
+
top_labels: Number of top classes to explain (default: 1)
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Explanation object
|
|
39
|
+
"""
|
|
40
|
+
instance = np.array(instance).reshape(1, -1) # Ensure 2D
|
|
41
|
+
shap_values = self.explainer.shap_values(instance)
|
|
42
|
+
|
|
43
|
+
if isinstance(shap_values, list):
|
|
44
|
+
# Multi-class: list of arrays, one per class
|
|
45
|
+
predicted_probs = self.model.predict(instance)[0]
|
|
46
|
+
top_indices = np.argsort(predicted_probs)[-top_labels:][::-1]
|
|
47
|
+
label_index = top_indices[0]
|
|
48
|
+
label_name = self.class_names[label_index]
|
|
49
|
+
class_shap = shap_values[label_index][0]
|
|
50
|
+
else:
|
|
51
|
+
# Single-class (regression or binary classification)
|
|
52
|
+
label_index = 0
|
|
53
|
+
label_name = self.class_names[0] if self.class_names else "class_0"
|
|
54
|
+
class_shap = shap_values[0]
|
|
55
|
+
|
|
56
|
+
flat_shap = np.array(class_shap).flatten()
|
|
57
|
+
attributions = {
|
|
58
|
+
fname: float(flat_shap[i])
|
|
59
|
+
for i, fname in enumerate(self.feature_names)
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
return Explanation(
|
|
63
|
+
explainer_name="SHAP",
|
|
64
|
+
target_class=label_name,
|
|
65
|
+
explanation_data={"feature_attributions": attributions}
|
|
66
|
+
)
|