explainiverse 0.1.1a1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. explainiverse/__init__.py +45 -1
  2. explainiverse/adapters/__init__.py +9 -0
  3. explainiverse/adapters/base_adapter.py +25 -25
  4. explainiverse/adapters/sklearn_adapter.py +32 -32
  5. explainiverse/core/__init__.py +22 -0
  6. explainiverse/core/explainer.py +31 -31
  7. explainiverse/core/explanation.py +24 -24
  8. explainiverse/core/registry.py +545 -0
  9. explainiverse/engine/__init__.py +8 -0
  10. explainiverse/engine/suite.py +142 -142
  11. explainiverse/evaluation/__init__.py +8 -0
  12. explainiverse/evaluation/metrics.py +232 -232
  13. explainiverse/explainers/__init__.py +38 -0
  14. explainiverse/explainers/attribution/__init__.py +9 -0
  15. explainiverse/explainers/attribution/lime_wrapper.py +90 -63
  16. explainiverse/explainers/attribution/shap_wrapper.py +89 -66
  17. explainiverse/explainers/counterfactual/__init__.py +8 -0
  18. explainiverse/explainers/counterfactual/dice_wrapper.py +302 -0
  19. explainiverse/explainers/global_explainers/__init__.py +23 -0
  20. explainiverse/explainers/global_explainers/ale.py +191 -0
  21. explainiverse/explainers/global_explainers/partial_dependence.py +192 -0
  22. explainiverse/explainers/global_explainers/permutation_importance.py +123 -0
  23. explainiverse/explainers/global_explainers/sage.py +164 -0
  24. explainiverse/explainers/rule_based/__init__.py +8 -0
  25. explainiverse/explainers/rule_based/anchors_wrapper.py +350 -0
  26. explainiverse-0.2.0.dist-info/METADATA +264 -0
  27. explainiverse-0.2.0.dist-info/RECORD +29 -0
  28. explainiverse-0.1.1a1.dist-info/METADATA +0 -128
  29. explainiverse-0.1.1a1.dist-info/RECORD +0 -19
  30. {explainiverse-0.1.1a1.dist-info → explainiverse-0.2.0.dist-info}/LICENSE +0 -0
  31. {explainiverse-0.1.1a1.dist-info → explainiverse-0.2.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,350 @@
1
+ # src/explainiverse/explainers/rule_based/anchors_wrapper.py
2
+ """
3
+ Anchors Explainer - High-precision rule-based explanations.
4
+
5
+ Anchors are if-then rules that "anchor" a prediction, meaning the prediction
6
+ remains the same regardless of other features' values (with high probability).
7
+
8
+ Reference:
9
+ Ribeiro, M.T., Singh, S., & Guestrin, C. (2018). Anchors: High-Precision
10
+ Model-Agnostic Explanations. AAAI 2018.
11
+ """
12
+
13
+ import numpy as np
14
+ from typing import List, Optional, Tuple, Dict, Any
15
+ from itertools import combinations
16
+ from explainiverse.core.explainer import BaseExplainer
17
+ from explainiverse.core.explanation import Explanation
18
+
19
+
20
+ class AnchorsExplainer(BaseExplainer):
21
+ """
22
+ Anchors explainer for rule-based explanations.
23
+
24
+ Generates if-then rules that explain individual predictions with
25
+ high precision (the rule holds with high probability). Uses beam search
26
+ to efficiently explore the space of possible anchors, optimizing for
27
+ both precision (rule reliability) and coverage (rule generality).
28
+
29
+ The algorithm:
30
+ 1. Discretizes continuous features into interpretable bins
31
+ 2. Uses beam search to find minimal feature subsets (anchors)
32
+ 3. Evaluates precision via perturbation sampling
33
+ 4. Returns the shortest anchor meeting the precision threshold
34
+
35
+ Attributes:
36
+ model: Model adapter with .predict() method
37
+ training_data: Reference data for generating perturbations
38
+ feature_names: List of feature names
39
+ class_names: List of class names
40
+ threshold: Minimum precision for the anchor (default: 0.95)
41
+ n_samples: Number of samples for precision estimation (default: 1000)
42
+ beam_size: Number of candidates in beam search (default: 4)
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ model,
48
+ training_data: np.ndarray,
49
+ feature_names: List[str],
50
+ class_names: List[str],
51
+ threshold: float = 0.95,
52
+ n_samples: int = 1000,
53
+ beam_size: int = 4,
54
+ max_anchor_size: int = None,
55
+ discretizer: str = "quartile",
56
+ random_state: int = 42
57
+ ):
58
+ """
59
+ Initialize the Anchors explainer.
60
+
61
+ Args:
62
+ model: Model adapter with .predict() method
63
+ training_data: Reference data (n_samples, n_features)
64
+ feature_names: List of feature names
65
+ class_names: List of class names
66
+ threshold: Minimum precision for a valid anchor
67
+ n_samples: Number of perturbation samples
68
+ beam_size: Number of candidates to keep in beam search
69
+ max_anchor_size: Maximum number of conditions in anchor
70
+ discretizer: How to discretize continuous features ("quartile", "decile")
71
+ random_state: Random seed
72
+ """
73
+ super().__init__(model)
74
+ self.training_data = np.array(training_data)
75
+ self.feature_names = list(feature_names)
76
+ self.class_names = list(class_names)
77
+ self.threshold = threshold
78
+ self.n_samples = n_samples
79
+ self.beam_size = beam_size
80
+ self.max_anchor_size = max_anchor_size or len(feature_names)
81
+ self.discretizer = discretizer
82
+ self.random_state = random_state
83
+ self.rng = np.random.RandomState(random_state)
84
+
85
+ # Pre-compute feature statistics for discretization
86
+ self._compute_discretization()
87
+
88
+ def _compute_discretization(self):
89
+ """Pre-compute discretization bins for each feature."""
90
+ self.bins = {}
91
+ self.bin_labels = {}
92
+
93
+ if self.discretizer == "quartile":
94
+ percentiles = [25, 50, 75]
95
+ elif self.discretizer == "decile":
96
+ percentiles = list(range(10, 100, 10))
97
+ else:
98
+ percentiles = [25, 50, 75]
99
+
100
+ for idx in range(self.training_data.shape[1]):
101
+ values = self.training_data[:, idx]
102
+ bins = np.percentile(values, percentiles)
103
+ bins = np.unique(bins) # Remove duplicates
104
+ self.bins[idx] = bins
105
+
106
+ # Create human-readable labels
107
+ labels = []
108
+ if len(bins) == 0:
109
+ labels = [f"{self.feature_names[idx]} = any"]
110
+ else:
111
+ labels.append(f"{self.feature_names[idx]} <= {bins[0]:.2f}")
112
+ for i in range(len(bins) - 1):
113
+ labels.append(f"{bins[i]:.2f} < {self.feature_names[idx]} <= {bins[i+1]:.2f}")
114
+ labels.append(f"{self.feature_names[idx]} > {bins[-1]:.2f}")
115
+ self.bin_labels[idx] = labels
116
+
117
+ def _discretize_value(self, value: float, feature_idx: int) -> int:
118
+ """Discretize a single value into a bin index."""
119
+ bins = self.bins[feature_idx]
120
+ if len(bins) == 0:
121
+ return 0
122
+ return int(np.searchsorted(bins, value))
123
+
124
+ def _discretize_instance(self, instance: np.ndarray) -> np.ndarray:
125
+ """Discretize an entire instance."""
126
+ return np.array([
127
+ self._discretize_value(instance[i], i)
128
+ for i in range(len(instance))
129
+ ])
130
+
131
+ def _get_condition_label(self, feature_idx: int, bin_idx: int) -> str:
132
+ """Get human-readable label for a condition."""
133
+ labels = self.bin_labels[feature_idx]
134
+ if bin_idx < len(labels):
135
+ return labels[bin_idx]
136
+ return f"{self.feature_names[feature_idx]} in bin {bin_idx}"
137
+
138
+ def _generate_perturbations(
139
+ self,
140
+ instance: np.ndarray,
141
+ anchor: List[int],
142
+ n_samples: int
143
+ ) -> np.ndarray:
144
+ """
145
+ Generate perturbation samples that respect the anchor conditions.
146
+
147
+ Args:
148
+ instance: Original instance
149
+ anchor: List of feature indices that are fixed
150
+ n_samples: Number of samples to generate
151
+
152
+ Returns:
153
+ Array of perturbed samples
154
+ """
155
+ perturbations = np.zeros((n_samples, len(instance)))
156
+
157
+ # Discretize the instance
158
+ disc_instance = self._discretize_instance(instance)
159
+
160
+ for i in range(n_samples):
161
+ # Start with random sample from training data
162
+ sample_idx = self.rng.randint(len(self.training_data))
163
+ sample = self.training_data[sample_idx].copy()
164
+
165
+ # Fix anchor features to match the instance's bin
166
+ for feat_idx in anchor:
167
+ # Find values in training data that fall in the same bin
168
+ target_bin = disc_instance[feat_idx]
169
+ bins = self.bins[feat_idx]
170
+
171
+ # Get values in the same bin from training data
172
+ if len(bins) == 0:
173
+ # Use original value if no bins
174
+ sample[feat_idx] = instance[feat_idx]
175
+ else:
176
+ # Sample from values in the same bin
177
+ feature_values = self.training_data[:, feat_idx]
178
+ in_bin = np.array([
179
+ self._discretize_value(v, feat_idx) == target_bin
180
+ for v in feature_values
181
+ ])
182
+ if np.any(in_bin):
183
+ valid_values = feature_values[in_bin]
184
+ sample[feat_idx] = self.rng.choice(valid_values)
185
+ else:
186
+ sample[feat_idx] = instance[feat_idx]
187
+
188
+ perturbations[i] = sample
189
+
190
+ return perturbations
191
+
192
+ def _compute_precision(
193
+ self,
194
+ instance: np.ndarray,
195
+ anchor: List[int],
196
+ target_class: int
197
+ ) -> Tuple[float, int]:
198
+ """
199
+ Compute the precision of an anchor.
200
+
201
+ Precision = P(prediction = target_class | anchor conditions hold)
202
+
203
+ Returns:
204
+ Tuple of (precision, coverage_count)
205
+ """
206
+ perturbations = self._generate_perturbations(
207
+ instance, anchor, self.n_samples
208
+ )
209
+
210
+ predictions = self.model.predict(perturbations)
211
+
212
+ if predictions.ndim == 2:
213
+ pred_classes = np.argmax(predictions, axis=1)
214
+ else:
215
+ pred_classes = predictions
216
+
217
+ matches = np.sum(pred_classes == target_class)
218
+ precision = matches / len(pred_classes)
219
+
220
+ return precision, matches
221
+
222
+ def _compute_coverage(self, anchor: List[int], instance: np.ndarray) -> float:
223
+ """
224
+ Compute the coverage of an anchor (fraction of data matching conditions).
225
+ """
226
+ disc_instance = self._discretize_instance(instance)
227
+
228
+ matches = 0
229
+ for sample in self.training_data:
230
+ disc_sample = self._discretize_instance(sample)
231
+ if all(disc_sample[i] == disc_instance[i] for i in anchor):
232
+ matches += 1
233
+
234
+ return matches / len(self.training_data)
235
+
236
+ def _beam_search(
237
+ self,
238
+ instance: np.ndarray,
239
+ target_class: int
240
+ ) -> Tuple[List[int], float, float]:
241
+ """
242
+ Use beam search to find the best anchor.
243
+
244
+ Returns:
245
+ Tuple of (anchor_features, precision, coverage)
246
+ """
247
+ n_features = len(instance)
248
+
249
+ # Start with empty anchor
250
+ candidates = [
251
+ ([], 1.0, 1.0) # (anchor, precision, coverage)
252
+ ]
253
+
254
+ best_anchor = ([], 0.0, 1.0)
255
+
256
+ for _ in range(self.max_anchor_size):
257
+ new_candidates = []
258
+
259
+ for anchor, _, _ in candidates:
260
+ # Try adding each unused feature
261
+ for feat_idx in range(n_features):
262
+ if feat_idx in anchor:
263
+ continue
264
+
265
+ new_anchor = anchor + [feat_idx]
266
+ precision, _ = self._compute_precision(
267
+ instance, new_anchor, target_class
268
+ )
269
+ coverage = self._compute_coverage(new_anchor, instance)
270
+
271
+ new_candidates.append((new_anchor, precision, coverage))
272
+
273
+ # Check if this is a valid anchor
274
+ if precision >= self.threshold:
275
+ if coverage > best_anchor[2] or \
276
+ (coverage == best_anchor[2] and len(new_anchor) < len(best_anchor[0])):
277
+ best_anchor = (new_anchor, precision, coverage)
278
+
279
+ if not new_candidates:
280
+ break
281
+
282
+ # Keep top candidates by precision (prefer smaller anchors for ties)
283
+ new_candidates.sort(
284
+ key=lambda x: (x[1], -len(x[0]), x[2]),
285
+ reverse=True
286
+ )
287
+ candidates = new_candidates[:self.beam_size]
288
+
289
+ # Early stopping if we found a good anchor
290
+ if best_anchor[1] >= self.threshold:
291
+ # Check if we can improve coverage
292
+ can_improve = any(c[1] >= self.threshold and c[2] > best_anchor[2]
293
+ for c in candidates)
294
+ if not can_improve:
295
+ break
296
+
297
+ if best_anchor[0]:
298
+ return best_anchor
299
+ elif candidates:
300
+ # Return best candidate even if below threshold
301
+ return max(candidates, key=lambda x: x[1])
302
+ else:
303
+ return ([], 0.0, 1.0)
304
+
305
+ def explain(self, instance: np.ndarray, **kwargs) -> Explanation:
306
+ """
307
+ Generate an anchor explanation for the given instance.
308
+
309
+ Args:
310
+ instance: The instance to explain (1D array)
311
+
312
+ Returns:
313
+ Explanation object with anchor rules
314
+ """
315
+ instance = np.array(instance).flatten()
316
+
317
+ # Get the model's prediction
318
+ predictions = self.model.predict(instance.reshape(1, -1))
319
+ if predictions.ndim == 2:
320
+ target_class = np.argmax(predictions[0])
321
+ else:
322
+ target_class = int(predictions[0])
323
+
324
+ target_name = self.class_names[target_class] if target_class < len(self.class_names) else f"class_{target_class}"
325
+
326
+ # Find anchor using beam search
327
+ anchor_features, precision, coverage = self._beam_search(instance, target_class)
328
+
329
+ # Convert to human-readable rules
330
+ disc_instance = self._discretize_instance(instance)
331
+ rules = [
332
+ self._get_condition_label(feat_idx, int(disc_instance[feat_idx]))
333
+ for feat_idx in anchor_features
334
+ ]
335
+
336
+ return Explanation(
337
+ explainer_name="Anchors",
338
+ target_class=target_name,
339
+ explanation_data={
340
+ "rules": rules,
341
+ "precision": float(precision),
342
+ "coverage": float(coverage),
343
+ "anchor_features": [self.feature_names[i] for i in anchor_features],
344
+ "anchor_indices": anchor_features,
345
+ "feature_attributions": {
346
+ self.feature_names[i]: 1.0 / (idx + 1)
347
+ for idx, i in enumerate(anchor_features)
348
+ } if anchor_features else {}
349
+ }
350
+ )
@@ -0,0 +1,264 @@
1
+ Metadata-Version: 2.1
2
+ Name: explainiverse
3
+ Version: 0.2.0
4
+ Summary: Unified, extensible explainability framework supporting LIME, SHAP, Anchors, Counterfactuals, PDP, ALE, SAGE, and more
5
+ Home-page: https://github.com/jemsbhai/explainiverse
6
+ License: MIT
7
+ Keywords: xai,explainability,interpretability,machine-learning,lime,shap,anchors
8
+ Author: Muntaser Syed
9
+ Author-email: jemsbhai@gmail.com
10
+ Requires-Python: >=3.10,<3.13
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Dist: lime (>=0.2.0.1,<0.3.0.0)
21
+ Requires-Dist: numpy (>=1.24,<2.0)
22
+ Requires-Dist: scikit-learn (>=1.1,<1.6)
23
+ Requires-Dist: scipy (>=1.10,<2.0)
24
+ Requires-Dist: shap (>=0.48.0,<0.49.0)
25
+ Requires-Dist: xgboost (>=1.7,<3.0)
26
+ Project-URL: Repository, https://github.com/jemsbhai/explainiverse
27
+ Description-Content-Type: text/markdown
28
+
29
+ # Explainiverse
30
+
31
+ **Explainiverse** is a unified, extensible Python framework for Explainable AI (XAI).
32
+ It provides a standardized interface for model-agnostic explainability with 8 state-of-the-art XAI methods, evaluation metrics, and a plugin registry for easy extensibility.
33
+
34
+ ---
35
+
36
+ ## Features
37
+
38
+ ### 🎯 Comprehensive XAI Coverage
39
+
40
+ **Local Explainers** (instance-level explanations):
41
+ - **LIME** - Local Interpretable Model-agnostic Explanations ([Ribeiro et al., 2016](https://arxiv.org/abs/1602.04938))
42
+ - **SHAP** - SHapley Additive exPlanations via KernelSHAP ([Lundberg & Lee, 2017](https://arxiv.org/abs/1705.07874))
43
+ - **Anchors** - High-precision rule-based explanations ([Ribeiro et al., 2018](https://ojs.aaai.org/index.php/AAAI/article/view/11491))
44
+ - **Counterfactual** - DiCE-style diverse counterfactual explanations ([Mothilal et al., 2020](https://arxiv.org/abs/1905.07697))
45
+
46
+ **Global Explainers** (model-level explanations):
47
+ - **Permutation Importance** - Feature importance via performance degradation ([Breiman, 2001](https://link.springer.com/article/10.1023/A:1010933404324))
48
+ - **Partial Dependence (PDP)** - Marginal feature effects ([Friedman, 2001](https://projecteuclid.org/euclid.aos/1013203451))
49
+ - **ALE** - Accumulated Local Effects, unbiased for correlated features ([Apley & Zhu, 2020](https://academic.oup.com/jrsssb/article/82/4/1059/7056085))
50
+ - **SAGE** - Shapley Additive Global importancE ([Covert et al., 2020](https://arxiv.org/abs/2004.00668))
51
+
52
+ ### 🔌 Extensible Plugin Registry
53
+ - Register custom explainers with rich metadata
54
+ - Filter by scope (local/global), model type, data type
55
+ - Automatic recommendations based on use case
56
+
57
+ ### 📊 Evaluation Metrics
58
+ - **AOPC** (Area Over Perturbation Curve)
59
+ - **ROAR** (Remove And Retrain)
60
+ - Multiple baseline options and curve generation
61
+
62
+ ### 🧪 Standardized Interface
63
+ - Consistent `BaseExplainer` API
64
+ - Unified `Explanation` output format
65
+ - Model adapters for sklearn and more
66
+
67
+ ---
68
+
69
+ ## Installation
70
+
71
+ From PyPI:
72
+
73
+ ```bash
74
+ pip install explainiverse
75
+ ```
76
+
77
+ For development:
78
+
79
+ ```bash
80
+ git clone https://github.com/jemsbhai/explainiverse.git
81
+ cd explainiverse
82
+ poetry install
83
+ ```
84
+
85
+ ---
86
+
87
+ ## Quick Start
88
+
89
+ ### Using the Registry (Recommended)
90
+
91
+ ```python
92
+ from explainiverse import default_registry, SklearnAdapter
93
+ from sklearn.ensemble import RandomForestClassifier
94
+ from sklearn.datasets import load_iris
95
+
96
+ # Train a model
97
+ iris = load_iris()
98
+ model = RandomForestClassifier().fit(iris.data, iris.target)
99
+ adapter = SklearnAdapter(model, class_names=iris.target_names.tolist())
100
+
101
+ # List available explainers
102
+ print(default_registry.list_explainers())
103
+ # ['lime', 'shap', 'anchors', 'counterfactual', 'permutation_importance', 'partial_dependence', 'ale', 'sage']
104
+
105
+ # Create and use an explainer
106
+ explainer = default_registry.create(
107
+ "lime",
108
+ model=adapter,
109
+ training_data=iris.data,
110
+ feature_names=iris.feature_names,
111
+ class_names=iris.target_names.tolist()
112
+ )
113
+ explanation = explainer.explain(iris.data[0])
114
+ print(explanation.explanation_data["feature_attributions"])
115
+ ```
116
+
117
+ ### Filter Explainers by Criteria
118
+
119
+ ```python
120
+ # Find local explainers for tabular data
121
+ local_tabular = default_registry.filter(scope="local", data_type="tabular")
122
+ print(local_tabular) # ['lime', 'shap', 'anchors', 'counterfactual']
123
+
124
+ # Find global explainers
125
+ global_explainers = default_registry.filter(scope="global")
126
+ print(global_explainers) # ['permutation_importance', 'partial_dependence', 'ale', 'sage']
127
+
128
+ # Get recommendations
129
+ recommendations = default_registry.recommend(
130
+ model_type="any",
131
+ data_type="tabular",
132
+ scope_preference="local"
133
+ )
134
+ ```
135
+
136
+ ### Using Specific Explainers
137
+
138
+ ```python
139
+ # Anchors - Rule-based explanations
140
+ from explainiverse.explainers import AnchorsExplainer
141
+
142
+ anchors = AnchorsExplainer(
143
+ model=adapter,
144
+ training_data=X_train,
145
+ feature_names=feature_names,
146
+ class_names=class_names
147
+ )
148
+ explanation = anchors.explain(instance)
149
+ print(explanation.explanation_data["rules"])
150
+ # ['petal length (cm) > 2.45', 'petal width (cm) <= 1.75']
151
+
152
+ # Counterfactual - What-if explanations
153
+ from explainiverse.explainers import CounterfactualExplainer
154
+
155
+ cf = CounterfactualExplainer(
156
+ model=adapter,
157
+ training_data=X_train,
158
+ feature_names=feature_names
159
+ )
160
+ explanation = cf.explain(instance, num_counterfactuals=3)
161
+ print(explanation.explanation_data["changes"])
162
+
163
+ # SAGE - Global Shapley importance
164
+ from explainiverse.explainers import SAGEExplainer
165
+
166
+ sage = SAGEExplainer(
167
+ model=adapter,
168
+ X=X_train,
169
+ y=y_train,
170
+ feature_names=feature_names
171
+ )
172
+ explanation = sage.explain()
173
+ print(explanation.explanation_data["feature_attributions"])
174
+ ```
175
+
176
+ ### Explanation Suite (Multi-Explainer Comparison)
177
+
178
+ ```python
179
+ from explainiverse import ExplanationSuite
180
+
181
+ suite = ExplanationSuite(
182
+ model=adapter,
183
+ explainer_configs=[
184
+ ("lime", {"training_data": X_train, "feature_names": feature_names, "class_names": class_names}),
185
+ ("shap", {"background_data": X_train[:50], "feature_names": feature_names, "class_names": class_names}),
186
+ ]
187
+ )
188
+
189
+ results = suite.run(instance)
190
+ suite.compare()
191
+ ```
192
+
193
+ ---
194
+
195
+ ## Registering Custom Explainers
196
+
197
+ ```python
198
+ from explainiverse import ExplainerRegistry, ExplainerMeta, BaseExplainer
199
+
200
+ @default_registry.register_decorator(
201
+ name="my_explainer",
202
+ meta=ExplainerMeta(
203
+ scope="local",
204
+ model_types=["any"],
205
+ data_types=["tabular"],
206
+ description="My custom explainer",
207
+ paper_reference="Author et al., 2024"
208
+ )
209
+ )
210
+ class MyExplainer(BaseExplainer):
211
+ def explain(self, instance, **kwargs):
212
+ # Your implementation
213
+ return Explanation(...)
214
+ ```
215
+
216
+ ---
217
+
218
+ ## Running Tests
219
+
220
+ ```bash
221
+ # Run all tests
222
+ poetry run pytest
223
+
224
+ # Run with coverage
225
+ poetry run pytest --cov=explainiverse
226
+
227
+ # Run specific test file
228
+ poetry run pytest tests/test_new_explainers.py -v
229
+ ```
230
+
231
+ ---
232
+
233
+ ## Roadmap
234
+
235
+ - [x] LIME, SHAP (KernelSHAP)
236
+ - [x] Anchors, Counterfactuals
237
+ - [x] Permutation Importance, PDP, ALE, SAGE
238
+ - [x] Explainer Registry with filtering
239
+ - [ ] TreeSHAP (optimized for tree models)
240
+ - [ ] Integrated Gradients (gradient-based for neural nets)
241
+ - [ ] PyTorch/TensorFlow adapters
242
+ - [ ] Interactive visualization dashboard
243
+
244
+ ---
245
+
246
+ ## Citation
247
+
248
+ If you use Explainiverse in your research, please cite:
249
+
250
+ ```bibtex
251
+ @software{explainiverse2024,
252
+ title = {Explainiverse: A Unified Framework for Explainable AI},
253
+ author = {Syed, Muntaser},
254
+ year = {2024},
255
+ url = {https://github.com/jemsbhai/explainiverse}
256
+ }
257
+ ```
258
+
259
+ ---
260
+
261
+ ## License
262
+
263
+ MIT License - see [LICENSE](LICENSE) for details.
264
+
@@ -0,0 +1,29 @@
1
+ explainiverse/__init__.py,sha256=G8RreMDjukC0quXjsutBmEP_dtTdj_Q9CClA61xen0o,1207
2
+ explainiverse/adapters/__init__.py,sha256=fNlWQ0VDjNqi4G4lwaJRTtL0wGVgvEE-4pZt6vOOjYU,322
3
+ explainiverse/adapters/base_adapter.py,sha256=Nqt0GeDn_-PjTyJcZsE8dRTulavqFQsv8sMYWS_ps-M,603
4
+ explainiverse/adapters/sklearn_adapter.py,sha256=pzIBtMuqrG-6ZbUqUCMt7rSk3Ow0FgrY268FSweFvw4,958
5
+ explainiverse/core/__init__.py,sha256=P3jHMnH5coFqTTO1w-gT-rurkCM1-9r3pF-055pbXMg,474
6
+ explainiverse/core/explainer.py,sha256=Z9on-9VblYDlQx9oBm1BHpmAf_NsQajZ3qr-u48Aejo,784
7
+ explainiverse/core/explanation.py,sha256=6zxFh_TH8tFHc-r_H5-WHQ05Sp1Kp2TxLz3gyFek5jo,881
8
+ explainiverse/core/registry.py,sha256=neI--cDC6j2VyZdUQbKkt1ERLGsaJdSyZSMvJ1b9RYs,19061
9
+ explainiverse/engine/__init__.py,sha256=1sZO8nH1mmwK2e-KUavBQm7zYDWUe27nyWoFy9tgsiA,197
10
+ explainiverse/engine/suite.py,sha256=sq8SK_6Pf0qRckTmVJ7Mdosu9bhkjAGPGN8ymLGFP9E,4914
11
+ explainiverse/evaluation/__init__.py,sha256=Y50L_b4HKthg4epwcayPHXh0l4i4MUuzvaNlqPmUNZY,212
12
+ explainiverse/evaluation/metrics.py,sha256=tSBXtyA_-0zOGCGjlPZU6LdGKRH_QpWfgKa78sdlovs,7453
13
+ explainiverse/explainers/__init__.py,sha256=CYaDGsASoiNkwUUkeugLowR1-kupLNSvaCK8Fw_zdRI,1564
14
+ explainiverse/explainers/attribution/__init__.py,sha256=ei8w6_4VL5aA5HSwIhcJx6gD_oVNAYFf_H1PRAi1SCA,326
15
+ explainiverse/explainers/attribution/lime_wrapper.py,sha256=OnXIV7t6yd-vt38sIi7XmHFbgzlZfCEbRlFyGGd5XiE,3245
16
+ explainiverse/explainers/attribution/shap_wrapper.py,sha256=tKie5AvN7mb55PWOYdMvW0lUAYjfHPzYosEloEY2ZzI,3210
17
+ explainiverse/explainers/counterfactual/__init__.py,sha256=gEV6P8h2fZ3-pv5rqp5sNDqrLErh5ntqpxIIBVCMFv4,247
18
+ explainiverse/explainers/counterfactual/dice_wrapper.py,sha256=PyJYF-z1nyyy0mFROnkJqPtcuT2PwEBARwfh37mZ5ew,11373
19
+ explainiverse/explainers/global_explainers/__init__.py,sha256=91xayho0r-fVeIxBLTxF-aBaBhRTRRXxGZ7oUHh7z64,713
20
+ explainiverse/explainers/global_explainers/ale.py,sha256=tgG3XTppCf8LiD7uKzBt4DI8C589EHsTmzeydHh79OQ,6287
21
+ explainiverse/explainers/global_explainers/partial_dependence.py,sha256=dH6yMjpwZads3pACR3rSykTbssLGHH7e6HfMlpl-S3I,6745
22
+ explainiverse/explainers/global_explainers/permutation_importance.py,sha256=bcgKz1S_D3lrBMgpqEF_Z6qw8Knxl_cfR50hrSO2tBc,4410
23
+ explainiverse/explainers/global_explainers/sage.py,sha256=57Xw1SK529x5JXWt0TVrcFYUUP3C65LfUwgoM-Z3gaw,5839
24
+ explainiverse/explainers/rule_based/__init__.py,sha256=gKzlFCAzwurAMLJcuYgal4XhDj1thteBGcaHWmN7iWk,243
25
+ explainiverse/explainers/rule_based/anchors_wrapper.py,sha256=ML7W6aam-eMGZHy5ilol8qupZvNBJpYAFatEEPnuMyo,13254
26
+ explainiverse-0.2.0.dist-info/LICENSE,sha256=28rbHe8rJgmUlRdxJACfq1Sj-MtCEhyHxkJedQd1ZYA,1070
27
+ explainiverse-0.2.0.dist-info/METADATA,sha256=WqgN7AquEhUxeO6G3ZLhOXjKuNoVcOuVfXU8jQ1u1F0,7731
28
+ explainiverse-0.2.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
29
+ explainiverse-0.2.0.dist-info/RECORD,,