explainiverse 0.1.1a0__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. explainiverse-0.2.0/PKG-INFO +264 -0
  2. explainiverse-0.2.0/README.md +235 -0
  3. explainiverse-0.2.0/pyproject.toml +64 -0
  4. explainiverse-0.2.0/src/explainiverse/__init__.py +45 -0
  5. explainiverse-0.2.0/src/explainiverse/adapters/__init__.py +9 -0
  6. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/adapters/base_adapter.py +25 -25
  7. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/adapters/sklearn_adapter.py +32 -32
  8. explainiverse-0.2.0/src/explainiverse/core/__init__.py +22 -0
  9. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/core/explainer.py +31 -31
  10. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/core/explanation.py +24 -24
  11. explainiverse-0.2.0/src/explainiverse/core/registry.py +545 -0
  12. explainiverse-0.2.0/src/explainiverse/engine/__init__.py +8 -0
  13. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/engine/suite.py +142 -142
  14. explainiverse-0.2.0/src/explainiverse/evaluation/__init__.py +8 -0
  15. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/evaluation/metrics.py +232 -232
  16. explainiverse-0.2.0/src/explainiverse/explainers/__init__.py +38 -0
  17. explainiverse-0.2.0/src/explainiverse/explainers/attribution/__init__.py +9 -0
  18. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/explainers/attribution/lime_wrapper.py +90 -63
  19. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/src/explainiverse/explainers/attribution/shap_wrapper.py +89 -66
  20. explainiverse-0.2.0/src/explainiverse/explainers/counterfactual/__init__.py +8 -0
  21. explainiverse-0.2.0/src/explainiverse/explainers/counterfactual/dice_wrapper.py +302 -0
  22. explainiverse-0.2.0/src/explainiverse/explainers/global_explainers/__init__.py +23 -0
  23. explainiverse-0.2.0/src/explainiverse/explainers/global_explainers/ale.py +191 -0
  24. explainiverse-0.2.0/src/explainiverse/explainers/global_explainers/partial_dependence.py +192 -0
  25. explainiverse-0.2.0/src/explainiverse/explainers/global_explainers/permutation_importance.py +123 -0
  26. explainiverse-0.2.0/src/explainiverse/explainers/global_explainers/sage.py +164 -0
  27. explainiverse-0.2.0/src/explainiverse/explainers/rule_based/__init__.py +8 -0
  28. explainiverse-0.2.0/src/explainiverse/explainers/rule_based/anchors_wrapper.py +350 -0
  29. explainiverse-0.1.1a0/PKG-INFO +0 -84
  30. explainiverse-0.1.1a0/README.md +0 -61
  31. explainiverse-0.1.1a0/pyproject.toml +0 -27
  32. explainiverse-0.1.1a0/src/explainiverse/__init__.py +0 -1
  33. explainiverse-0.1.1a0/src/explainiverse/adapters/__init__.py +0 -0
  34. explainiverse-0.1.1a0/src/explainiverse/core/__init__.py +0 -0
  35. explainiverse-0.1.1a0/src/explainiverse/engine/__init__.py +0 -0
  36. explainiverse-0.1.1a0/src/explainiverse/evaluation/__init__.py +0 -0
  37. explainiverse-0.1.1a0/src/explainiverse/explainers/__init__.py +0 -0
  38. explainiverse-0.1.1a0/src/explainiverse/explainers/attribution/__init__.py +0 -0
  39. {explainiverse-0.1.1a0 → explainiverse-0.2.0}/LICENSE +0 -0
@@ -0,0 +1,264 @@
1
+ Metadata-Version: 2.1
2
+ Name: explainiverse
3
+ Version: 0.2.0
4
+ Summary: Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more
5
+ Home-page: https://github.com/jemsbhai/explainiverse
6
+ License: MIT
7
+ Keywords: xai,explainability,interpretability,machine-learning,lime,shap,anchors
8
+ Author: Muntaser Syed
9
+ Author-email: jemsbhai@gmail.com
10
+ Requires-Python: >=3.10,<3.13
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
21
+ Requires-Dist: numpy (>=1.24,<2.0)
22
+ Requires-Dist: scikit-learn (>=1.1,<1.6)
23
+ Requires-Dist: scipy (>=1.10,<2.0)
24
+ Requires-Dist: shap (>=0.48.0,<0.49.0)
25
+ Requires-Dist: xgboost (>=1.7,<3.0)
26
+ Project-URL: Repository, https://github.com/jemsbhai/explainiverse
27
+ Description-Content-Type: text/markdown
28
+
29
+ # Explainiverse
30
+
31
+ **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
32
+ It provides a standardized interface for model-agnostic explainability with 8 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
33
+
34
+ ---
35
+
36
+ ## Features
37
+
38
+ ### 🎯 Comprehensive XAI Coverage
39
+
40
+ **Local Explainers** (instance-level explanations):
41
+ - **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
42
+ - **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
43
+ - **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
44
+ - **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
45
+
46
+ **Global Explainers** (model-level explanations):
47
+ - **Permutation Importance** - Feature importance via performance degradation ([Breiman, 2001](https://link.springer.com/article/10.1023/A:1010933404324))
48
+ - **Partial Dependence (PDP)** - Marginal feature effects ([Friedman, 2001](https://projecteuclid.org/euclid.aos/1013203451))
49
+ - **ALE** - Accumulated Local Effects, unbiased for correlated features ([Apley & Zhu, 2020](https://academic.oup.com/jrsssb/article/82/4/1059/7056085))
50
+ - **SAGE** - Shapley Additive Global importancE ([Covert et al., 2020](https://arxiv.org/abs/2004.00668))
51
+
52
+ ### 🔌 Extensible Plugin Registry
53
+ - Register custom explainers with rich metadata
54
+ - Filter by scope (local/global), model type, data type
55
+ - Automatic recommendations based on use case
56
+
57
+ ### 📊 Evaluation Metrics
58
+ - **AOPC** (Area Over Perturbation Curve)
59
+ - **ROAR** (Remove And Retrain)
60
+ - Multiple baseline options and curve generation
61
+
62
+ ### 🧪 Standardized Interface
63
+ - Consistent `BaseExplainer` API
64
+ - Unified `Explanation` output format
65
+ - Model adapters for sklearn and more
66
+
67
+ ---
68
+
69
+ ## Installation
70
+
71
+ From PyPI:
72
+
73
+ ```bash
74
+ pip install explainiverse
75
+ ```
76
+
77
+ For development:
78
+
79
+ ```bash
80
+ git clone https://github.com/jemsbhai/explainiverse.git
81
+ cd explainiverse
82
+ poetry install
83
+ ```
84
+
85
+ ---
86
+
87
+ ## Quick Start
88
+
89
+ ### Using the Registry (Recommended)
90
+
91
+ ```python
92
+ from explainiverse import default_registry, SklearnAdapter
93
+ from sklearn.ensemble import RandomForestClassifier
94
+ from sklearn.datasets import load_iris
95
+
96
+ # Train a model
97
+ iris = load_iris()
98
+ model = RandomForestClassifier().fit(iris.data, iris.target)
99
+ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
100
+
101
+ # List available explainers
102
+ print(default_registry.list_explainers())
103
+ # ['lime', 'shap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
104
+
105
+ # Create and use an explainer
106
+ explainer = default_registry.create(
107
+ "lime",
108
+ model=adapter,
109
+ training_data=iris.data,
110
+ feature_names=iris.feature_names,
111
+ class_names=iris.target_names.tolist()
112
+ )
113
+ explanation = explainer.explain(iris.data[0])
114
+ print(explanation.explanation_data["feature_attributions"])
115
+ ```
116
+
117
+ ### Filter Explainers by Criteria
118
+
119
+ ```python
120
+ # Find local explainers for tabular data
121
+ local_tabular = default_registry.filter(scope="local", data_type="tabular")
122
+ print(local_tabular) # ['lime', 'shap', 'anchors', 'counterfactual']
123
+
124
+ # Find global explainers
125
+ global_explainers = default_registry.filter(scope="global")
126
+ print(global_explainers) # ['permutation_importance', 'partial_dependence', 'ale', 'sage']
127
+
128
+ # Get recommendations
129
+ recommendations = default_registry.recommend(
130
+ model_type="any",
131
+ data_type="tabular",
132
+ scope_preference="local"
133
+ )
134
+ ```
135
+
136
+ ### Using Specific Explainers
137
+
138
+ ```python
139
+ # Anchors - Rule-based explanations
140
+ from explainiverse.explainers import AnchorsExplainer
141
+
142
+ anchors = AnchorsExplainer(
143
+ model=adapter,
144
+ training_data=X_train,
145
+ feature_names=feature_names,
146
+ class_names=class_names
147
+ )
148
+ explanation = anchors.explain(instance)
149
+ print(explanation.explanation_data["rules"])
150
+ # ['petal length (cm) > 2.45', 'petal width (cm) <= 1.75']
151
+
152
+ # Counterfactual - What-if explanations
153
+ from explainiverse.explainers import CounterfactualExplainer
154
+
155
+ cf = CounterfactualExplainer(
156
+ model=adapter,
157
+ training_data=X_train,
158
+ feature_names=feature_names
159
+ )
160
+ explanation = cf.explain(instance, num_counterfactuals=3)
161
+ print(explanation.explanation_data["changes"])
162
+
163
+ # SAGE - Global Shapley importance
164
+ from explainiverse.explainers import SAGEExplainer
165
+
166
+ sage = SAGEExplainer(
167
+ model=adapter,
168
+ X=X_train,
169
+ y=y_train,
170
+ feature_names=feature_names
171
+ )
172
+ explanation = sage.explain()
173
+ print(explanation.explanation_data["feature_attributions"])
174
+ ```
175
+
176
+ ### Explanation Suite (Multi-Explainer Comparison)
177
+
178
+ ```python
179
+ from explainiverse import ExplanationSuite
180
+
181
+ suite = ExplanationSuite(
182
+ model=adapter,
183
+ explainer_configs=[
184
+ ("lime", {"training_data": X_train, "feature_names": feature_names, "class_names": class_names}),
185
+ ("shap", {"background_data": X_train[:50], "feature_names": feature_names, "class_names": class_names}),
186
+ ]
187
+ )
188
+
189
+ results = suite.run(instance)
190
+ suite.compare()
191
+ ```
192
+
193
+ ---
194
+
195
+ ## Registering Custom Explainers
196
+
197
+ ```python
198
+ from explainiverse import ExplainerRegistry, ExplainerMeta, BaseExplainer
199
+
200
+ @default_registry.register_decorator(
201
+ name="my_explainer",
202
+ meta=ExplainerMeta(
203
+ scope="local",
204
+ model_types=["any"],
205
+ data_types=["tabular"],
206
+ description="My custom explainer",
207
+ paper_reference="Author et al., 2024"
208
+ )
209
+ )
210
+ class MyExplainer(BaseExplainer):
211
+ def explain(self, instance, **kwargs):
212
+ # Your implementation
213
+ return Explanation(...)
214
+ ```
215
+
216
+ ---
217
+
218
+ ## Running Tests
219
+
220
+ ```bash
221
+ # Run all tests
222
+ poetry run pytest
223
+
224
+ # Run with coverage
225
+ poetry run pytest --cov=explainiverse
226
+
227
+ # Run specific test file
228
+ poetry run pytest tests/test_new_explainers.py -v
229
+ ```
230
+
231
+ ---
232
+
233
+ ## Roadmap
234
+
235
+ - [x] LIME, SHAP (KernelSHAP)
236
+ - [x] Anchors, Counterfactuals
237
+ - [x] Permutation Importance, PDP, ALE, SAGE
238
+ - [x] Explainer Registry with filtering
239
+ - [ ] TreeSHAP (optimized for tree models)
240
+ - [ ] Integrated Gradients (gradient-based for neural nets)
241
+ - [ ] PyTorch/TensorFlow adapters
242
+ - [ ] Interactive visualization dashboard
243
+
244
+ ---
245
+
246
+ ## Citation
247
+
248
+ If you use Explainiverse in your research, please cite:
249
+
250
+ ```bibtex
251
+ @software{explainiverse2024,
252
+ title = {Explainiverse: A Unified Framework for Explainable AI},
253
+ author = {Syed, Muntaser},
254
+ year = {2024},
255
+ url = {https://github.com/jemsbhai/explainiverse}
256
+ }
257
+ ```
258
+
259
+ ---
260
+
261
+ ## License
262
+
263
+ MIT License - see [LICENSE](LICENSE) for details.
264
+
@@ -0,0 +1,235 @@
1
+ # Explainiverse
2
+
3
+ **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
4
+ It provides a standardized interface for model-agnostic explainability with 8 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
5
+
6
+ ---
7
+
8
+ ## Features
9
+
10
+ ### 🎯 Comprehensive XAI Coverage
11
+
12
+ **Local Explainers** (instance-level explanations):
13
+ - **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
14
+ - **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
15
+ - **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
16
+ - **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
17
+
18
+ **Global Explainers** (model-level explanations):
19
+ - **Permutation Importance** - Feature importance via performance degradation ([Breiman, 2001](https://link.springer.com/article/10.1023/A:1010933404324))
20
+ - **Partial Dependence (PDP)** - Marginal feature effects ([Friedman, 2001](https://projecteuclid.org/euclid.aos/1013203451))
21
+ - **ALE** - Accumulated Local Effects, unbiased for correlated features ([Apley & Zhu, 2020](https://academic.oup.com/jrsssb/article/82/4/1059/7056085))
22
+ - **SAGE** - Shapley Additive Global importancE ([Covert et al., 2020](https://arxiv.org/abs/2004.00668))
23
+
24
+ ### 🔌 Extensible Plugin Registry
25
+ - Register custom explainers with rich metadata
26
+ - Filter by scope (local/global), model type, data type
27
+ - Automatic recommendations based on use case
28
+
29
+ ### 📊 Evaluation Metrics
30
+ - **AOPC** (Area Over Perturbation Curve)
31
+ - **ROAR** (Remove And Retrain)
32
+ - Multiple baseline options and curve generation
33
+
34
+ ### 🧪 Standardized Interface
35
+ - Consistent `BaseExplainer` API
36
+ - Unified `Explanation` output format
37
+ - Model adapters for sklearn and more
38
+
39
+ ---
40
+
41
+ ## Installation
42
+
43
+ From PyPI:
44
+
45
+ ```bash
46
+ pip install explainiverse
47
+ ```
48
+
49
+ For development:
50
+
51
+ ```bash
52
+ git clone https://github.com/jemsbhai/explainiverse.git
53
+ cd explainiverse
54
+ poetry install
55
+ ```
56
+
57
+ ---
58
+
59
+ ## Quick Start
60
+
61
+ ### Using the Registry (Recommended)
62
+
63
+ ```python
64
+ from explainiverse import default_registry, SklearnAdapter
65
+ from sklearn.ensemble import RandomForestClassifier
66
+ from sklearn.datasets import load_iris
67
+
68
+ # Train a model
69
+ iris = load_iris()
70
+ model = RandomForestClassifier().fit(iris.data, iris.target)
71
+ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
72
+
73
+ # List available explainers
74
+ print(default_registry.list_explainers())
75
+ # ['lime', 'shap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
76
+
77
+ # Create and use an explainer
78
+ explainer = default_registry.create(
79
+ "lime",
80
+ model=adapter,
81
+ training_data=iris.data,
82
+ feature_names=iris.feature_names,
83
+ class_names=iris.target_names.tolist()
84
+ )
85
+ explanation = explainer.explain(iris.data[0])
86
+ print(explanation.explanation_data["feature_attributions"])
87
+ ```
88
+
89
+ ### Filter Explainers by Criteria
90
+
91
+ ```python
92
+ # Find local explainers for tabular data
93
+ local_tabular = default_registry.filter(scope="local", data_type="tabular")
94
+ print(local_tabular) # ['lime', 'shap', 'anchors', 'counterfactual']
95
+
96
+ # Find global explainers
97
+ global_explainers = default_registry.filter(scope="global")
98
+ print(global_explainers) # ['permutation_importance', 'partial_dependence', 'ale', 'sage']
99
+
100
+ # Get recommendations
101
+ recommendations = default_registry.recommend(
102
+ model_type="any",
103
+ data_type="tabular",
104
+ scope_preference="local"
105
+ )
106
+ ```
107
+
108
+ ### Using Specific Explainers
109
+
110
+ ```python
111
+ # Anchors - Rule-based explanations
112
+ from explainiverse.explainers import AnchorsExplainer
113
+
114
+ anchors = AnchorsExplainer(
115
+ model=adapter,
116
+ training_data=X_train,
117
+ feature_names=feature_names,
118
+ class_names=class_names
119
+ )
120
+ explanation = anchors.explain(instance)
121
+ print(explanation.explanation_data["rules"])
122
+ # ['petal length (cm) > 2.45', 'petal width (cm) <= 1.75']
123
+
124
+ # Counterfactual - What-if explanations
125
+ from explainiverse.explainers import CounterfactualExplainer
126
+
127
+ cf = CounterfactualExplainer(
128
+ model=adapter,
129
+ training_data=X_train,
130
+ feature_names=feature_names
131
+ )
132
+ explanation = cf.explain(instance, num_counterfactuals=3)
133
+ print(explanation.explanation_data["changes"])
134
+
135
+ # SAGE - Global Shapley importance
136
+ from explainiverse.explainers import SAGEExplainer
137
+
138
+ sage = SAGEExplainer(
139
+ model=adapter,
140
+ X=X_train,
141
+ y=y_train,
142
+ feature_names=feature_names
143
+ )
144
+ explanation = sage.explain()
145
+ print(explanation.explanation_data["feature_attributions"])
146
+ ```
147
+
148
+ ### Explanation Suite (Multi-Explainer Comparison)
149
+
150
+ ```python
151
+ from explainiverse import ExplanationSuite
152
+
153
+ suite = ExplanationSuite(
154
+ model=adapter,
155
+ explainer_configs=[
156
+ ("lime", {"training_data": X_train, "feature_names": feature_names, "class_names": class_names}),
157
+ ("shap", {"background_data": X_train[:50], "feature_names": feature_names, "class_names": class_names}),
158
+ ]
159
+ )
160
+
161
+ results = suite.run(instance)
162
+ suite.compare()
163
+ ```
164
+
165
+ ---
166
+
167
+ ## Registering Custom Explainers
168
+
169
+ ```python
170
+ from explainiverse import ExplainerRegistry, ExplainerMeta, BaseExplainer
171
+
172
+ @default_registry.register_decorator(
173
+ name="my_explainer",
174
+ meta=ExplainerMeta(
175
+ scope="local",
176
+ model_types=["any"],
177
+ data_types=["tabular"],
178
+ description="My custom explainer",
179
+ paper_reference="Author et al., 2024"
180
+ )
181
+ )
182
+ class MyExplainer(BaseExplainer):
183
+ def explain(self, instance, **kwargs):
184
+ # Your implementation
185
+ return Explanation(...)
186
+ ```
187
+
188
+ ---
189
+
190
+ ## Running Tests
191
+
192
+ ```bash
193
+ # Run all tests
194
+ poetry run pytest
195
+
196
+ # Run with coverage
197
+ poetry run pytest --cov=explainiverse
198
+
199
+ # Run specific test file
200
+ poetry run pytest tests/test_new_explainers.py -v
201
+ ```
202
+
203
+ ---
204
+
205
+ ## Roadmap
206
+
207
+ - [x] LIME, SHAP (KernelSHAP)
208
+ - [x] Anchors, Counterfactuals
209
+ - [x] Permutation Importance, PDP, ALE, SAGE
210
+ - [x] Explainer Registry with filtering
211
+ - [ ] TreeSHAP (optimized for tree models)
212
+ - [ ] Integrated Gradients (gradient-based for neural nets)
213
+ - [ ] PyTorch/TensorFlow adapters
214
+ - [ ] Interactive visualization dashboard
215
+
216
+ ---
217
+
218
+ ## Citation
219
+
220
+ If you use Explainiverse in your research, please cite:
221
+
222
+ ```bibtex
223
+ @software{explainiverse2024,
224
+ title = {Explainiverse: A Unified Framework for Explainable AI},
225
+ author = {Syed, Muntaser},
226
+ year = {2024},
227
+ url = {https://github.com/jemsbhai/explainiverse}
228
+ }
229
+ ```
230
+
231
+ ---
232
+
233
+ ## License
234
+
235
+ MIT License - see [LICENSE](LICENSE) for details.
@@ -0,0 +1,64 @@
1
+ [tool.poetry]
2
+ name = "explainiverse"
3
+ version = "0.2.0"
4
+ description = "Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more"
5
+ authors = ["Muntaser Syed <jemsbhai@gmail.com>"]
6
+ license = "MIT"
7
+ readme = "README.md"
8
+ repository = "https://github.com/jemsbhai/explainiverse"
9
+ homepage = "https://github.com/jemsbhai/explainiverse"
10
+ packages = [{ include = "explainiverse", from = "src" }]
11
+ keywords = ["xai", "explainability", "interpretability", "machine-learning", "lime", "shap", "anchors"]
12
+ classifiers = [
13
+ "Development Status :: 4 - Beta",
14
+ "Intended Audience :: Science/Research",
15
+ "Intended Audience :: Developers",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Programming Language :: Python :: 3",
18
+ "Programming Language :: Python :: 3.10",
19
+ "Programming Language :: Python :: 3.11",
20
+ "Programming Language :: Python :: 3.12",
21
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
22
+ ]
23
+
24
+ [tool.poetry.dependencies]
25
+ python = ">=3.10,<3.13"
26
+ numpy = ">=1.24,<2.0"
27
+ lime = "^0.2.0.1"
28
+ scikit-learn = ">=1.1,<1.6"
29
+ shap = "^0.48.0"
30
+ scipy = ">=1.10,<2.0"
31
+ xgboost = ">=1.7,<3.0"
32
+
33
+ [tool.poetry.group.dev.dependencies]
34
+ pytest = "^8.0"
35
+ pytest-cov = "^4.1"
36
+ black = "^24.0"
37
+ isort = "^5.13"
38
+ mypy = "^1.8"
39
+
40
+ [tool.pytest.ini_options]
41
+ testpaths = ["tests"]
42
+ python_files = ["test_*.py"]
43
+ python_functions = ["test_*"]
44
+ addopts = "-v --tb=short"
45
+ filterwarnings = [
46
+ "ignore::DeprecationWarning",
47
+ "ignore::FutureWarning",
48
+ ]
49
+
50
+ [tool.black]
51
+ line-length = 100
52
+ target-version = ['py310', 'py311', 'py312']
53
+
54
+ [tool.isort]
55
+ profile = "black"
56
+ line_length = 100
57
+
58
+ [build-system]
59
+ requires = ["poetry-core"]
60
+ build-backend = "poetry.core.masonry.api"
61
+
62
+ [project.urls]
63
+ "Bug Tracker" = "https://github.com/jemsbhai/explainiverse/issues"
64
+ "Documentation" = "https://github.com/jemsbhai/explainiverse#readme"
@@ -0,0 +1,45 @@
1
+ # src/explainiverse/__init__.py
2
+ """
3
+ Explainiverse - A unified, extensible explainability framework.
4
+
5
+ Supports multiple XAI methods including LIME, SHAP, Anchors, Counterfactuals,
6
+ Permutation Importance, PDP, ALE, and SAGE through a consistent interface.
7
+
8
+ Quick Start:
9
+ from explainiverse import default_registry
10
+
11
+ # List available explainers
12
+ print(default_registry.list_explainers())
13
+
14
+ # Create an explainer
15
+ explainer = default_registry.create("lime", model=adapter, training_data=X, ...)
16
+ explanation = explainer.explain(instance)
17
+ """
18
+
19
+ from explainiverse.core.explainer import BaseExplainer
20
+ from explainiverse.core.explanation import Explanation
21
+ from explainiverse.core.registry import (
22
+ ExplainerRegistry,
23
+ ExplainerMeta,
24
+ default_registry,
25
+ get_default_registry,
26
+ )
27
+ from explainiverse.adapters.sklearn_adapter import SklearnAdapter
28
+ from explainiverse.engine.suite import ExplanationSuite
29
+
30
+ __version__ = "0.2.0"
31
+
32
+ __all__ = [
33
+ # Core
34
+ "BaseExplainer",
35
+ "Explanation",
36
+ # Registry
37
+ "ExplainerRegistry",
38
+ "ExplainerMeta",
39
+ "default_registry",
40
+ "get_default_registry",
41
+ # Adapters
42
+ "SklearnAdapter",
43
+ # Engine
44
+ "ExplanationSuite",
45
+ ]
@@ -0,0 +1,9 @@
1
+ # src/explainiverse/adapters/__init__.py
2
+ """
3
+ Model adapters - wrappers that provide a consistent interface for different ML frameworks.
4
+ """
5
+
6
+ from explainiverse.adapters.base_adapter import BaseModelAdapter
7
+ from explainiverse.adapters.sklearn_adapter import SklearnAdapter
8
+
9
+ __all__ = ["BaseModelAdapter", "SklearnAdapter"]
@@ -1,25 +1,25 @@
1
- # src/explainiverse/adapters/base_adapter.py
2
-
3
- from abc import ABC, abstractmethod
4
-
5
- class BaseModelAdapter(ABC):
6
- """
7
- Abstract base class for all model adapters.
8
- """
9
-
10
- def __init__(self, model, feature_names=None):
11
- self.model = model
12
- self.feature_names = feature_names
13
-
14
- @abstractmethod
15
- def predict(self, data):
16
- """
17
- Returns prediction probabilities or outputs in a standard format.
18
-
19
- Args:
20
- data: Input data (single instance or batch).
21
-
22
- Returns:
23
- List or NumPy array of prediction scores.
24
- """
25
- pass
1
+ # src/explainiverse/adapters/base_adapter.py
2
+
3
+ from abc import ABC, abstractmethod
4
+
5
+ class BaseModelAdapter(ABC):
6
+ """
7
+ Abstract base class for all model adapters.
8
+ """
9
+
10
+ def __init__(self, model, feature_names=None):
11
+ self.model = model
12
+ self.feature_names = feature_names
13
+
14
+ @abstractmethod
15
+ def predict(self, data):
16
+ """
17
+ Returns prediction probabilities or outputs in a standard format.
18
+
19
+ Args:
20
+ data: Input data (single instance or batch).
21
+
22
+ Returns:
23
+ List or NumPy array of prediction scores.
24
+ """
25
+ pass