explainiverse 0.8.4__tar.gz → 0.8.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {explainiverse-0.8.4 → explainiverse-0.8.5}/PKG-INFO +3 -2
- {explainiverse-0.8.4 → explainiverse-0.8.5}/README.md +2 -1
- {explainiverse-0.8.4 → explainiverse-0.8.5}/pyproject.toml +1 -1
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/__init__.py +1 -1
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/evaluation/__init__.py +4 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/evaluation/faithfulness_extended.py +217 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/LICENSE +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/adapters/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/adapters/base_adapter.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/adapters/pytorch_adapter.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/adapters/sklearn_adapter.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/core/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/core/explainer.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/core/explanation.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/core/registry.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/engine/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/engine/suite.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/evaluation/_utils.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/evaluation/faithfulness.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/evaluation/metrics.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/evaluation/stability.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/attribution/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/attribution/lime_wrapper.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/attribution/shap_wrapper.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/attribution/treeshap_wrapper.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/counterfactual/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/counterfactual/dice_wrapper.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/example_based/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/example_based/protodash.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/global_explainers/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/global_explainers/ale.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/global_explainers/partial_dependence.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/global_explainers/permutation_importance.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/global_explainers/sage.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/deeplift.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/gradcam.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/integrated_gradients.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/lrp.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/saliency.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/smoothgrad.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/tcav.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/rule_based/__init__.py +0 -0
- {explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/rule_based/anchors_wrapper.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: explainiverse
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.5
|
|
4
4
|
Summary: Unified, extensible explainability framework supporting 18 XAI methods including LIME, SHAP, LRP, TCAV, GradCAM, and more
|
|
5
5
|
Home-page: https://github.com/jemsbhai/explainiverse
|
|
6
6
|
License: MIT
|
|
@@ -44,7 +44,7 @@ Description-Content-Type: text/markdown
|
|
|
44
44
|
| Feature | Description |
|
|
45
45
|
|---------|-------------|
|
|
46
46
|
| **18 Explainers** | LIME, KernelSHAP, TreeSHAP, Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++, LRP, TCAV, Anchors, Counterfactual, Permutation Importance, PDP, ALE, SAGE, ProtoDash |
|
|
47
|
-
| **
|
|
47
|
+
| **14 Evaluation Metrics** | Faithfulness (PGI, PGU, Comprehensiveness, Sufficiency, Correlation, Faithfulness Estimate, Monotonicity, Monotonicity-Nguyen, Pixel Flipping, Region Perturbation) and Stability (RIS, ROS, Lipschitz) |
|
|
48
48
|
| **Unified API** | Consistent `BaseExplainer` interface with standardized `Explanation` output |
|
|
49
49
|
| **Plugin Registry** | Filter explainers by scope, model type, data type; automatic recommendations |
|
|
50
50
|
| **Framework Support** | Adapters for scikit-learn and PyTorch (with gradient computation) |
|
|
@@ -100,6 +100,7 @@ Explainiverse includes a comprehensive suite of evaluation metrics based on the
|
|
|
100
100
|
| **Monotonicity** | Sequential feature addition shows monotonic prediction increase | [Arya et al., 2019](https://arxiv.org/abs/1909.03012) |
|
|
101
101
|
| **Monotonicity-Nguyen** | Spearman correlation between attributions and feature removal impact | [Nguyen & Martinez, 2020](https://arxiv.org/abs/2010.07455) |
|
|
102
102
|
| **Pixel Flipping** | AUC of prediction degradation when removing features by importance | [Bach et al., 2015](https://doi.org/10.1371/journal.pone.0130140) |
|
|
103
|
+
| **Region Perturbation** | AUC of prediction degradation when perturbing feature regions by importance | [Samek et al., 2015](https://arxiv.org/abs/1509.06321) |
|
|
103
104
|
|
|
104
105
|
### Stability Metrics
|
|
105
106
|
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
| Feature | Description |
|
|
14
14
|
|---------|-------------|
|
|
15
15
|
| **18 Explainers** | LIME, KernelSHAP, TreeSHAP, Integrated Gradients, DeepLIFT, DeepSHAP, SmoothGrad, Saliency Maps, GradCAM/GradCAM++, LRP, TCAV, Anchors, Counterfactual, Permutation Importance, PDP, ALE, SAGE, ProtoDash |
|
|
16
|
-
| **
|
|
16
|
+
| **14 Evaluation Metrics** | Faithfulness (PGI, PGU, Comprehensiveness, Sufficiency, Correlation, Faithfulness Estimate, Monotonicity, Monotonicity-Nguyen, Pixel Flipping, Region Perturbation) and Stability (RIS, ROS, Lipschitz) |
|
|
17
17
|
| **Unified API** | Consistent `BaseExplainer` interface with standardized `Explanation` output |
|
|
18
18
|
| **Plugin Registry** | Filter explainers by scope, model type, data type; automatic recommendations |
|
|
19
19
|
| **Framework Support** | Adapters for scikit-learn and PyTorch (with gradient computation) |
|
|
@@ -69,6 +69,7 @@ Explainiverse includes a comprehensive suite of evaluation metrics based on the
|
|
|
69
69
|
| **Monotonicity** | Sequential feature addition shows monotonic prediction increase | [Arya et al., 2019](https://arxiv.org/abs/1909.03012) |
|
|
70
70
|
| **Monotonicity-Nguyen** | Spearman correlation between attributions and feature removal impact | [Nguyen & Martinez, 2020](https://arxiv.org/abs/2010.07455) |
|
|
71
71
|
| **Pixel Flipping** | AUC of prediction degradation when removing features by importance | [Bach et al., 2015](https://doi.org/10.1371/journal.pone.0130140) |
|
|
72
|
+
| **Region Perturbation** | AUC of prediction degradation when perturbing feature regions by importance | [Samek et al., 2015](https://arxiv.org/abs/1509.06321) |
|
|
72
73
|
|
|
73
74
|
### Stability Metrics
|
|
74
75
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "explainiverse"
|
|
3
|
-
version = "0.8.
|
|
3
|
+
version = "0.8.5"
|
|
4
4
|
description = "Unified, extensible explainability framework supporting 18 XAI methods including LIME, SHAP, LRP, TCAV, GradCAM, and more"
|
|
5
5
|
authors = ["Muntaser Syed <jemsbhai@gmail.com>"]
|
|
6
6
|
license = "MIT"
|
|
@@ -45,6 +45,8 @@ from explainiverse.evaluation.faithfulness_extended import (
|
|
|
45
45
|
compute_batch_monotonicity_nguyen,
|
|
46
46
|
compute_pixel_flipping,
|
|
47
47
|
compute_batch_pixel_flipping,
|
|
48
|
+
compute_region_perturbation,
|
|
49
|
+
compute_batch_region_perturbation,
|
|
48
50
|
)
|
|
49
51
|
|
|
50
52
|
__all__ = [
|
|
@@ -78,4 +80,6 @@ __all__ = [
|
|
|
78
80
|
"compute_batch_monotonicity_nguyen",
|
|
79
81
|
"compute_pixel_flipping",
|
|
80
82
|
"compute_batch_pixel_flipping",
|
|
83
|
+
"compute_region_perturbation",
|
|
84
|
+
"compute_batch_region_perturbation",
|
|
81
85
|
]
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/evaluation/faithfulness_extended.py
RENAMED
|
@@ -256,6 +256,223 @@ def compute_batch_faithfulness_estimate(
|
|
|
256
256
|
}
|
|
257
257
|
|
|
258
258
|
|
|
259
|
+
# =============================================================================
|
|
260
|
+
# Metric 5: Region Perturbation (Samek et al., 2015)
|
|
261
|
+
# =============================================================================
|
|
262
|
+
|
|
263
|
+
def compute_region_perturbation(
|
|
264
|
+
model,
|
|
265
|
+
instance: np.ndarray,
|
|
266
|
+
explanation: Explanation,
|
|
267
|
+
baseline: Union[str, float, np.ndarray, Callable] = "mean",
|
|
268
|
+
background_data: np.ndarray = None,
|
|
269
|
+
target_class: int = None,
|
|
270
|
+
region_size: int = None,
|
|
271
|
+
use_absolute: bool = True,
|
|
272
|
+
return_curve: bool = False,
|
|
273
|
+
) -> Union[float, Dict[str, Union[float, np.ndarray]]]:
|
|
274
|
+
"""
|
|
275
|
+
Compute Region Perturbation score (Samek et al., 2015).
|
|
276
|
+
|
|
277
|
+
Similar to Pixel Flipping, but operates on regions (groups) of features
|
|
278
|
+
rather than individual features. Features are divided into non-overlapping
|
|
279
|
+
regions, and regions are perturbed in order of their cumulative importance
|
|
280
|
+
(sum of attributions within the region).
|
|
281
|
+
|
|
282
|
+
This metric is particularly relevant for image data where local spatial
|
|
283
|
+
correlations exist, but is also applicable to tabular data with groups
|
|
284
|
+
of related features.
|
|
285
|
+
|
|
286
|
+
The score is the Area Under the perturbation Curve (AUC), normalized
|
|
287
|
+
to [0, 1]. Lower AUC indicates better faithfulness (faster degradation
|
|
288
|
+
when important regions are removed first).
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
model: Model adapter with predict/predict_proba method
|
|
292
|
+
instance: Input instance (1D array)
|
|
293
|
+
explanation: Explanation object with feature_attributions
|
|
294
|
+
baseline: Baseline for feature removal ("mean", "median", scalar, array, callable)
|
|
295
|
+
background_data: Reference data for computing baseline (required for "mean"/"median")
|
|
296
|
+
target_class: Target class index for probability (default: predicted class)
|
|
297
|
+
region_size: Number of features per region. If None, defaults to max(1, n_features // 4)
|
|
298
|
+
For image-like data, this would correspond to patch size.
|
|
299
|
+
use_absolute: If True, sort regions by absolute attribution sum (default: True)
|
|
300
|
+
return_curve: If True, return full degradation curve and details
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
If return_curve=False: AUC score (float, 0 to 1, lower is better)
|
|
304
|
+
If return_curve=True: Dictionary with:
|
|
305
|
+
- 'auc': float - Area under the perturbation curve
|
|
306
|
+
- 'curve': np.ndarray - Normalized prediction values at each step
|
|
307
|
+
- 'predictions': np.ndarray - Raw prediction values
|
|
308
|
+
- 'region_order': list - Order in which regions were perturbed
|
|
309
|
+
- 'regions': list - List of feature indices in each region
|
|
310
|
+
- 'n_regions': int - Number of regions
|
|
311
|
+
- 'region_size': int - Size of each region
|
|
312
|
+
|
|
313
|
+
References:
|
|
314
|
+
Samek, W., Binder, A., Montavon, G., Lapuschkin, S., & Müller, K. R. (2015).
|
|
315
|
+
Evaluating the Visualization of What a Deep Neural Network has Learned.
|
|
316
|
+
arXiv preprint arXiv:1509.06321.
|
|
317
|
+
"""
|
|
318
|
+
instance = np.asarray(instance).flatten()
|
|
319
|
+
n_features = len(instance)
|
|
320
|
+
|
|
321
|
+
# Get baseline values
|
|
322
|
+
baseline_values = compute_baseline_values(
|
|
323
|
+
baseline, background_data, n_features
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
# Extract attributions as array
|
|
327
|
+
attr_array = _extract_attribution_array(explanation, n_features)
|
|
328
|
+
|
|
329
|
+
# Determine region size
|
|
330
|
+
if region_size is None:
|
|
331
|
+
# Default: divide features into ~4 regions
|
|
332
|
+
region_size = max(1, n_features // 4)
|
|
333
|
+
region_size = max(1, min(region_size, n_features)) # Clamp to valid range
|
|
334
|
+
|
|
335
|
+
# Create non-overlapping regions
|
|
336
|
+
regions = []
|
|
337
|
+
for start_idx in range(0, n_features, region_size):
|
|
338
|
+
end_idx = min(start_idx + region_size, n_features)
|
|
339
|
+
regions.append(list(range(start_idx, end_idx)))
|
|
340
|
+
|
|
341
|
+
n_regions = len(regions)
|
|
342
|
+
|
|
343
|
+
# Compute region importance (sum of attributions in each region)
|
|
344
|
+
region_importance = []
|
|
345
|
+
for region in regions:
|
|
346
|
+
if use_absolute:
|
|
347
|
+
importance = np.sum(np.abs(attr_array[region]))
|
|
348
|
+
else:
|
|
349
|
+
importance = np.sum(attr_array[region])
|
|
350
|
+
region_importance.append(importance)
|
|
351
|
+
|
|
352
|
+
# Sort regions by importance (descending - most important first)
|
|
353
|
+
sorted_region_indices = np.argsort(-np.array(region_importance))
|
|
354
|
+
|
|
355
|
+
# Determine target class
|
|
356
|
+
if target_class is None:
|
|
357
|
+
pred = get_prediction_value(model, instance.reshape(1, -1))
|
|
358
|
+
if isinstance(pred, np.ndarray) and pred.ndim > 0:
|
|
359
|
+
target_class = int(np.argmax(pred))
|
|
360
|
+
else:
|
|
361
|
+
target_class = 0
|
|
362
|
+
|
|
363
|
+
# Get original prediction for the target class
|
|
364
|
+
original_pred = get_prediction_value(model, instance.reshape(1, -1))
|
|
365
|
+
if isinstance(original_pred, np.ndarray) and original_pred.ndim > 0 and len(original_pred) > target_class:
|
|
366
|
+
original_value = original_pred[target_class]
|
|
367
|
+
else:
|
|
368
|
+
original_value = float(original_pred)
|
|
369
|
+
|
|
370
|
+
# Start with original instance
|
|
371
|
+
current = instance.copy()
|
|
372
|
+
|
|
373
|
+
# Track predictions as regions are perturbed
|
|
374
|
+
predictions = [original_value]
|
|
375
|
+
|
|
376
|
+
# Perturb regions one by one (most important first)
|
|
377
|
+
for region_idx in sorted_region_indices:
|
|
378
|
+
region = regions[region_idx]
|
|
379
|
+
|
|
380
|
+
# Replace all features in this region with baseline
|
|
381
|
+
for feat_idx in region:
|
|
382
|
+
current[feat_idx] = baseline_values[feat_idx]
|
|
383
|
+
|
|
384
|
+
# Get prediction
|
|
385
|
+
pred = get_prediction_value(model, current.reshape(1, -1))
|
|
386
|
+
if isinstance(pred, np.ndarray) and pred.ndim > 0 and len(pred) > target_class:
|
|
387
|
+
predictions.append(pred[target_class])
|
|
388
|
+
else:
|
|
389
|
+
predictions.append(float(pred))
|
|
390
|
+
|
|
391
|
+
predictions = np.array(predictions)
|
|
392
|
+
|
|
393
|
+
# Normalize predictions to [0, 1] relative to original
|
|
394
|
+
# curve[i] = prediction after perturbing i regions / original prediction
|
|
395
|
+
if abs(original_value) > 1e-10:
|
|
396
|
+
curve = predictions / original_value
|
|
397
|
+
else:
|
|
398
|
+
# Handle zero original prediction
|
|
399
|
+
curve = predictions
|
|
400
|
+
|
|
401
|
+
# Compute AUC using trapezoidal rule
|
|
402
|
+
# x-axis: fraction of regions perturbed (0 to 1)
|
|
403
|
+
# y-axis: relative prediction value
|
|
404
|
+
x = np.linspace(0, 1, len(predictions))
|
|
405
|
+
auc = np.trapz(curve, x)
|
|
406
|
+
|
|
407
|
+
if return_curve:
|
|
408
|
+
return {
|
|
409
|
+
"auc": float(auc),
|
|
410
|
+
"curve": curve,
|
|
411
|
+
"predictions": predictions,
|
|
412
|
+
"region_order": sorted_region_indices.tolist(),
|
|
413
|
+
"regions": regions,
|
|
414
|
+
"n_regions": n_regions,
|
|
415
|
+
"region_size": region_size,
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
return float(auc)
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def compute_batch_region_perturbation(
|
|
422
|
+
model,
|
|
423
|
+
X: np.ndarray,
|
|
424
|
+
explanations: List[Explanation],
|
|
425
|
+
baseline: Union[str, float, np.ndarray, Callable] = "mean",
|
|
426
|
+
max_samples: int = None,
|
|
427
|
+
region_size: int = None,
|
|
428
|
+
use_absolute: bool = True,
|
|
429
|
+
) -> Dict[str, float]:
|
|
430
|
+
"""
|
|
431
|
+
Compute average Region Perturbation score over a batch of instances.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
model: Model adapter
|
|
435
|
+
X: Input data (2D array)
|
|
436
|
+
explanations: List of Explanation objects (one per instance)
|
|
437
|
+
baseline: Baseline for feature removal
|
|
438
|
+
max_samples: Maximum number of samples to evaluate
|
|
439
|
+
region_size: Number of features per region (default: n_features // 4)
|
|
440
|
+
use_absolute: If True, sort regions by absolute attribution sum
|
|
441
|
+
|
|
442
|
+
Returns:
|
|
443
|
+
Dictionary with mean, std, min, max, and count of valid scores
|
|
444
|
+
"""
|
|
445
|
+
n_samples = len(explanations)
|
|
446
|
+
if max_samples:
|
|
447
|
+
n_samples = min(n_samples, max_samples)
|
|
448
|
+
|
|
449
|
+
scores = []
|
|
450
|
+
|
|
451
|
+
for i in range(n_samples):
|
|
452
|
+
try:
|
|
453
|
+
score = compute_region_perturbation(
|
|
454
|
+
model, X[i], explanations[i],
|
|
455
|
+
baseline=baseline, background_data=X,
|
|
456
|
+
region_size=region_size,
|
|
457
|
+
use_absolute=use_absolute
|
|
458
|
+
)
|
|
459
|
+
if not np.isnan(score):
|
|
460
|
+
scores.append(score)
|
|
461
|
+
except Exception:
|
|
462
|
+
continue
|
|
463
|
+
|
|
464
|
+
if not scores:
|
|
465
|
+
return {"mean": 0.0, "std": 0.0, "min": 0.0, "max": 0.0, "n_samples": 0}
|
|
466
|
+
|
|
467
|
+
return {
|
|
468
|
+
"mean": float(np.mean(scores)),
|
|
469
|
+
"std": float(np.std(scores)),
|
|
470
|
+
"min": float(np.min(scores)),
|
|
471
|
+
"max": float(np.max(scores)),
|
|
472
|
+
"n_samples": len(scores),
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
|
|
259
476
|
# =============================================================================
|
|
260
477
|
# Metric 4: Pixel Flipping (Bach et al., 2015)
|
|
261
478
|
# =============================================================================
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/attribution/__init__.py
RENAMED
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/attribution/lime_wrapper.py
RENAMED
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/attribution/shap_wrapper.py
RENAMED
|
File without changes
|
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/counterfactual/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/example_based/__init__.py
RENAMED
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/example_based/protodash.py
RENAMED
|
File without changes
|
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/global_explainers/ale.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/global_explainers/sage.py
RENAMED
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/__init__.py
RENAMED
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/deeplift.py
RENAMED
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/gradcam.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/saliency.py
RENAMED
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/gradient/smoothgrad.py
RENAMED
|
File without changes
|
|
File without changes
|
{explainiverse-0.8.4 → explainiverse-0.8.5}/src/explainiverse/explainers/rule_based/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|