explainiverse 0.8.1__py3-none-any.whl → 0.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
explainiverse/__init__.py CHANGED
@@ -34,7 +34,7 @@ from explainiverse.adapters.sklearn_adapter import SklearnAdapter
34
34
  from explainiverse.adapters import TORCH_AVAILABLE
35
35
  from explainiverse.engine.suite import ExplanationSuite
36
36
 
37
- __version__ = "0.8.1"
37
+ __version__ = "0.8.2"
38
38
 
39
39
  __all__ = [
40
40
  # Core
@@ -3,9 +3,10 @@
3
3
  Evaluation metrics for explanation quality.
4
4
 
5
5
  Includes:
6
- - Faithfulness metrics (PGI, PGU, Comprehensiveness, Sufficiency)
6
+ - Faithfulness metrics (PGI, PGU, Comprehensiveness, Sufficiency, Faithfulness Estimate)
7
7
  - Stability metrics (RIS, ROS, Lipschitz)
8
8
  - Perturbation metrics (AOPC, ROAR)
9
+ - Extended faithfulness metrics (Phase 1 expansion)
9
10
  """
10
11
 
11
12
  from explainiverse.evaluation.metrics import (
@@ -35,13 +36,20 @@ from explainiverse.evaluation.stability import (
35
36
  compare_explainer_stability,
36
37
  )
37
38
 
39
+ from explainiverse.evaluation.faithfulness_extended import (
40
+ compute_faithfulness_estimate,
41
+ compute_batch_faithfulness_estimate,
42
+ compute_monotonicity,
43
+ compute_batch_monotonicity,
44
+ )
45
+
38
46
  __all__ = [
39
47
  # Perturbation metrics (existing)
40
48
  "compute_aopc",
41
49
  "compute_batch_aopc",
42
50
  "compute_roar",
43
51
  "compute_roar_curve",
44
- # Faithfulness metrics (new)
52
+ # Faithfulness metrics (core)
45
53
  "compute_pgi",
46
54
  "compute_pgu",
47
55
  "compute_faithfulness_score",
@@ -50,11 +58,16 @@ __all__ = [
50
58
  "compute_faithfulness_correlation",
51
59
  "compare_explainer_faithfulness",
52
60
  "compute_batch_faithfulness",
53
- # Stability metrics (new)
61
+ # Stability metrics
54
62
  "compute_ris",
55
63
  "compute_ros",
56
64
  "compute_lipschitz_estimate",
57
65
  "compute_stability_metrics",
58
66
  "compute_batch_stability",
59
67
  "compare_explainer_stability",
68
+ # Extended faithfulness metrics (Phase 1)
69
+ "compute_faithfulness_estimate",
70
+ "compute_batch_faithfulness_estimate",
71
+ "compute_monotonicity",
72
+ "compute_batch_monotonicity",
60
73
  ]
@@ -0,0 +1,416 @@
1
+ # src/explainiverse/evaluation/faithfulness_extended.py
2
+ """
3
+ Extended faithfulness evaluation metrics.
4
+
5
+ Phase 1 metrics for exceeding OpenXAI/Quantus:
6
+ - Faithfulness Estimate (Alvarez-Melis et al., 2018)
7
+ - Monotonicity (Arya et al., 2019)
8
+ - Monotonicity-Nguyen (Nguyen et al., 2020)
9
+ - Pixel Flipping (Bach et al., 2015)
10
+ - Region Perturbation (Samek et al., 2015)
11
+ - Selectivity (Montavon et al., 2018)
12
+ - Sensitivity-n (Ancona et al., 2018)
13
+ - IROF (Rieger & Hansen, 2020)
14
+ - Infidelity (Yeh et al., 2019)
15
+ - ROAD (Rong et al., 2022)
16
+ - Insertion AUC (Petsiuk et al., 2018)
17
+ - Deletion AUC (Petsiuk et al., 2018)
18
+ """
19
+ import numpy as np
20
+ import re
21
+ from typing import Union, Callable, List, Dict, Optional, Tuple
22
+ from scipy import stats
23
+
24
+ from explainiverse.core.explanation import Explanation
25
+ from explainiverse.evaluation._utils import (
26
+ get_sorted_feature_indices,
27
+ compute_baseline_values,
28
+ apply_feature_mask,
29
+ resolve_k,
30
+ get_prediction_value,
31
+ compute_prediction_change,
32
+ )
33
+
34
+
35
+ def _extract_attribution_array(
36
+ explanation: Explanation,
37
+ n_features: int
38
+ ) -> np.ndarray:
39
+ """
40
+ Extract attribution values as a numpy array in feature index order.
41
+
42
+ Args:
43
+ explanation: Explanation object with feature_attributions
44
+ n_features: Expected number of features
45
+
46
+ Returns:
47
+ 1D numpy array of attribution values ordered by feature index
48
+ """
49
+ attributions = explanation.explanation_data.get("feature_attributions", {})
50
+ feature_names = getattr(explanation, 'feature_names', None)
51
+
52
+ if not attributions:
53
+ raise ValueError("No feature attributions found in explanation.")
54
+
55
+ # Build attribution array in feature order
56
+ attr_array = np.zeros(n_features)
57
+
58
+ if feature_names is not None:
59
+ for fname, value in attributions.items():
60
+ # Try to find the index for this feature name
61
+ for i, fn in enumerate(feature_names):
62
+ if fn == fname or fn in fname or fname in fn:
63
+ attr_array[i] = value
64
+ break
65
+ else:
66
+ # Try extracting index from name pattern
67
+ for pattern in [r'feature[_\s]*(\d+)', r'feat[_\s]*(\d+)', r'^f(\d+)', r'^x(\d+)']:
68
+ match = re.search(pattern, fname, re.IGNORECASE)
69
+ if match:
70
+ idx = int(match.group(1))
71
+ if 0 <= idx < n_features:
72
+ attr_array[idx] = value
73
+ break
74
+ else:
75
+ # No feature names - try to extract indices from keys
76
+ for fname, value in attributions.items():
77
+ for pattern in [r'feature[_\s]*(\d+)', r'feat[_\s]*(\d+)', r'^f(\d+)', r'^x(\d+)']:
78
+ match = re.search(pattern, fname, re.IGNORECASE)
79
+ if match:
80
+ idx = int(match.group(1))
81
+ if 0 <= idx < n_features:
82
+ attr_array[idx] = value
83
+ break
84
+
85
+ return attr_array
86
+
87
+
88
+ # =============================================================================
89
+ # Metric 1: Faithfulness Estimate (Alvarez-Melis & Jaakkola, 2018)
90
+ # =============================================================================
91
+
92
+ def compute_faithfulness_estimate(
93
+ model,
94
+ instance: np.ndarray,
95
+ explanation: Explanation,
96
+ baseline: Union[str, float, np.ndarray, Callable] = "mean",
97
+ background_data: np.ndarray = None,
98
+ subset_size: int = None,
99
+ n_subsets: int = 100,
100
+ seed: int = None,
101
+ ) -> float:
102
+ """
103
+ Compute Faithfulness Estimate (Alvarez-Melis & Jaakkola, 2018).
104
+
105
+ Measures the correlation between feature attributions and the actual
106
+ impact on predictions when individual features are perturbed. For each
107
+ feature, computes the prediction change when that feature is replaced
108
+ with baseline, then correlates these changes with attribution magnitudes.
109
+
110
+ Higher correlation indicates the explanation correctly identifies
111
+ which features actually matter for the prediction.
112
+
113
+ Args:
114
+ model: Model adapter with predict/predict_proba method
115
+ instance: Input instance (1D array)
116
+ explanation: Explanation object with feature_attributions
117
+ baseline: Baseline for feature replacement ("mean", "median", scalar, array, callable)
118
+ background_data: Reference data for computing baseline (required for "mean"/"median")
119
+ subset_size: Size of random subsets to perturb (default: 1 for single-feature)
120
+ n_subsets: Number of random subsets to evaluate (used when subset_size > 1)
121
+ seed: Random seed for reproducibility
122
+
123
+ Returns:
124
+ Faithfulness estimate score (Pearson correlation, -1 to 1, higher is better)
125
+
126
+ References:
127
+ Alvarez-Melis, D., & Jaakkola, T. S. (2018). Towards Robust Interpretability
128
+ with Self-Explaining Neural Networks. NeurIPS.
129
+ """
130
+ if seed is not None:
131
+ np.random.seed(seed)
132
+
133
+ instance = np.asarray(instance).flatten()
134
+ n_features = len(instance)
135
+
136
+ # Get baseline values
137
+ baseline_values = compute_baseline_values(
138
+ baseline, background_data, n_features
139
+ )
140
+
141
+ # Extract attributions as array
142
+ attr_array = _extract_attribution_array(explanation, n_features)
143
+
144
+ # Default subset_size is 1 (single-feature perturbation)
145
+ if subset_size is None:
146
+ subset_size = 1
147
+
148
+ if subset_size == 1:
149
+ # Single-feature perturbation: evaluate each feature individually
150
+ prediction_changes = []
151
+ attribution_values = []
152
+
153
+ for i in range(n_features):
154
+ # Skip features with zero attribution (they won't affect correlation)
155
+ if abs(attr_array[i]) < 1e-10:
156
+ continue
157
+
158
+ # Perturb single feature
159
+ perturbed = apply_feature_mask(instance, [i], baseline_values)
160
+
161
+ # Compute prediction change
162
+ change = compute_prediction_change(model, instance, perturbed, metric="absolute")
163
+
164
+ prediction_changes.append(change)
165
+ attribution_values.append(abs(attr_array[i]))
166
+
167
+ if len(prediction_changes) < 2:
168
+ return 0.0 # Not enough data points for correlation
169
+
170
+ # Compute Pearson correlation
171
+ corr, _ = stats.pearsonr(attribution_values, prediction_changes)
172
+
173
+ return float(corr) if not np.isnan(corr) else 0.0
174
+
175
+ else:
176
+ # Random subset perturbation
177
+ prediction_changes = []
178
+ attribution_sums = []
179
+
180
+ for _ in range(n_subsets):
181
+ # Sample random subset of features
182
+ subset_indices = np.random.choice(
183
+ n_features, size=min(subset_size, n_features), replace=False
184
+ )
185
+
186
+ # Perturb subset
187
+ perturbed = apply_feature_mask(instance, subset_indices.tolist(), baseline_values)
188
+
189
+ # Compute prediction change
190
+ change = compute_prediction_change(model, instance, perturbed, metric="absolute")
191
+
192
+ # Sum of attributions in subset
193
+ attr_sum = np.sum(np.abs(attr_array[subset_indices]))
194
+
195
+ prediction_changes.append(change)
196
+ attribution_sums.append(attr_sum)
197
+
198
+ if len(prediction_changes) < 2:
199
+ return 0.0
200
+
201
+ # Compute Pearson correlation
202
+ corr, _ = stats.pearsonr(attribution_sums, prediction_changes)
203
+
204
+ return float(corr) if not np.isnan(corr) else 0.0
205
+
206
+
207
+ def compute_batch_faithfulness_estimate(
208
+ model,
209
+ X: np.ndarray,
210
+ explanations: List[Explanation],
211
+ baseline: Union[str, float, np.ndarray, Callable] = "mean",
212
+ max_samples: int = None,
213
+ seed: int = None,
214
+ ) -> Dict[str, float]:
215
+ """
216
+ Compute average Faithfulness Estimate over a batch of instances.
217
+
218
+ Args:
219
+ model: Model adapter
220
+ X: Input data (2D array)
221
+ explanations: List of Explanation objects (one per instance)
222
+ baseline: Baseline for feature replacement
223
+ max_samples: Maximum number of samples to evaluate
224
+ seed: Random seed
225
+
226
+ Returns:
227
+ Dictionary with mean, std, min, max, and count of valid scores
228
+ """
229
+ n_samples = len(explanations)
230
+ if max_samples:
231
+ n_samples = min(n_samples, max_samples)
232
+
233
+ scores = []
234
+
235
+ for i in range(n_samples):
236
+ try:
237
+ score = compute_faithfulness_estimate(
238
+ model, X[i], explanations[i],
239
+ baseline=baseline, background_data=X,
240
+ seed=seed
241
+ )
242
+ if not np.isnan(score):
243
+ scores.append(score)
244
+ except Exception:
245
+ continue
246
+
247
+ if not scores:
248
+ return {"mean": 0.0, "std": 0.0, "min": 0.0, "max": 0.0, "n_samples": 0}
249
+
250
+ return {
251
+ "mean": float(np.mean(scores)),
252
+ "std": float(np.std(scores)),
253
+ "min": float(np.min(scores)),
254
+ "max": float(np.max(scores)),
255
+ "n_samples": len(scores),
256
+ }
257
+
258
+
259
+ # =============================================================================
260
+ # Metric 2: Monotonicity (Arya et al., 2019)
261
+ # =============================================================================
262
+
263
+ def compute_monotonicity(
264
+ model,
265
+ instance: np.ndarray,
266
+ explanation: Explanation,
267
+ baseline: Union[str, float, np.ndarray, Callable] = "mean",
268
+ background_data: np.ndarray = None,
269
+ target_class: int = None,
270
+ use_absolute: bool = True,
271
+ tolerance: float = 1e-6,
272
+ ) -> float:
273
+ """
274
+ Compute Monotonicity (Arya et al., 2019).
275
+
276
+ Measures whether sequentially adding features in order of their attributed
277
+ importance monotonically increases the model's prediction confidence.
278
+ Starting from a baseline (all features masked), features are revealed
279
+ one-by-one in descending order of attribution. A faithful explanation
280
+ should show monotonically increasing predictions.
281
+
282
+ Args:
283
+ model: Model adapter with predict/predict_proba method
284
+ instance: Input instance (1D array)
285
+ explanation: Explanation object with feature_attributions
286
+ baseline: Baseline for masked features ("mean", "median", scalar, array, callable)
287
+ background_data: Reference data for computing baseline (required for "mean"/"median")
288
+ target_class: Target class index for probability (default: predicted class)
289
+ use_absolute: If True, sort features by absolute attribution value
290
+ tolerance: Small value for numerical stability in monotonicity check
291
+
292
+ Returns:
293
+ Monotonicity score (0 to 1, higher is better)
294
+ 1.0 means perfectly monotonic increase
295
+
296
+ References:
297
+ Arya, V., et al. (2019). One Explanation Does Not Fit All: A Toolkit and
298
+ Taxonomy of AI Explainability Techniques. arXiv:1909.03012.
299
+ """
300
+ instance = np.asarray(instance).flatten()
301
+ n_features = len(instance)
302
+
303
+ # Get baseline values
304
+ baseline_values = compute_baseline_values(
305
+ baseline, background_data, n_features
306
+ )
307
+
308
+ # Extract attributions as array
309
+ attr_array = _extract_attribution_array(explanation, n_features)
310
+
311
+ # Sort features by attribution (descending - most important first)
312
+ if use_absolute:
313
+ sorted_indices = np.argsort(-np.abs(attr_array))
314
+ else:
315
+ sorted_indices = np.argsort(-attr_array)
316
+
317
+ # Determine target class
318
+ if target_class is None:
319
+ # Use predicted class
320
+ pred = get_prediction_value(model, instance.reshape(1, -1))
321
+ if isinstance(pred, np.ndarray) and pred.ndim > 0:
322
+ target_class = int(np.argmax(pred))
323
+ else:
324
+ target_class = 0
325
+
326
+ # Start from baseline (all features masked)
327
+ current = baseline_values.copy()
328
+
329
+ # Track predictions as features are revealed
330
+ predictions = []
331
+
332
+ # Get initial prediction (baseline state)
333
+ pred = get_prediction_value(model, current.reshape(1, -1))
334
+ if isinstance(pred, np.ndarray) and pred.ndim > 0 and len(pred) > target_class:
335
+ predictions.append(pred[target_class])
336
+ else:
337
+ predictions.append(float(pred))
338
+
339
+ # Add features one by one
340
+ revealed_features = []
341
+ for idx in sorted_indices:
342
+ # Reveal this feature (set to original value)
343
+ revealed_features.append(idx)
344
+ current[idx] = instance[idx]
345
+
346
+ # Get prediction
347
+ pred = get_prediction_value(model, current.reshape(1, -1))
348
+ if isinstance(pred, np.ndarray) and pred.ndim > 0 and len(pred) > target_class:
349
+ predictions.append(pred[target_class])
350
+ else:
351
+ predictions.append(float(pred))
352
+
353
+ # Count monotonic increases
354
+ # A step is monotonic if: pred[i+1] >= pred[i] - tolerance
355
+ n_steps = len(predictions) - 1
356
+ if n_steps == 0:
357
+ return 1.0
358
+
359
+ monotonic_steps = 0
360
+ for i in range(n_steps):
361
+ if predictions[i + 1] >= predictions[i] - tolerance:
362
+ monotonic_steps += 1
363
+
364
+ return float(monotonic_steps) / float(n_steps)
365
+
366
+
367
+ def compute_batch_monotonicity(
368
+ model,
369
+ X: np.ndarray,
370
+ explanations: List[Explanation],
371
+ baseline: Union[str, float, np.ndarray, Callable] = "mean",
372
+ max_samples: int = None,
373
+ use_absolute: bool = True,
374
+ ) -> Dict[str, float]:
375
+ """
376
+ Compute average Monotonicity over a batch of instances.
377
+
378
+ Args:
379
+ model: Model adapter
380
+ X: Input data (2D array)
381
+ explanations: List of Explanation objects (one per instance)
382
+ baseline: Baseline for masked features
383
+ max_samples: Maximum number of samples to evaluate
384
+ use_absolute: If True, sort features by absolute attribution value
385
+
386
+ Returns:
387
+ Dictionary with mean, std, min, max, and count of valid scores
388
+ """
389
+ n_samples = len(explanations)
390
+ if max_samples:
391
+ n_samples = min(n_samples, max_samples)
392
+
393
+ scores = []
394
+
395
+ for i in range(n_samples):
396
+ try:
397
+ score = compute_monotonicity(
398
+ model, X[i], explanations[i],
399
+ baseline=baseline, background_data=X,
400
+ use_absolute=use_absolute
401
+ )
402
+ if not np.isnan(score):
403
+ scores.append(score)
404
+ except Exception:
405
+ continue
406
+
407
+ if not scores:
408
+ return {"mean": 0.0, "std": 0.0, "min": 0.0, "max": 0.0, "n_samples": 0}
409
+
410
+ return {
411
+ "mean": float(np.mean(scores)),
412
+ "std": float(np.std(scores)),
413
+ "min": float(np.min(scores)),
414
+ "max": float(np.max(scores)),
415
+ "n_samples": len(scores),
416
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: explainiverse
3
- Version: 0.8.1
3
+ Version: 0.8.2
4
4
  Summary: Unified, extensible explainability framework supporting 18 XAI methods including LIME, SHAP, LRP, TCAV, GradCAM, and more
5
5
  Home-page: https://github.com/jemsbhai/explainiverse
6
6
  License: MIT
@@ -1,4 +1,4 @@
1
- explainiverse/__init__.py,sha256=fnB58cF7z1PvBjNkKvEcmXI0u2l3T3FYLyCx1sLEmXQ,1694
1
+ explainiverse/__init__.py,sha256=icvNmaSq0DAERqIrU60N60KCIspHbtEWTi3kt_YXTUI,1694
2
2
  explainiverse/adapters/__init__.py,sha256=HcQGISyp-YQ4jEj2IYveX_c9X5otLcTNWRnVRRhzRik,781
3
3
  explainiverse/adapters/base_adapter.py,sha256=Nqt0GeDn_-PjTyJcZsE8dRTulavqFQsv8sMYWS_ps-M,603
4
4
  explainiverse/adapters/pytorch_adapter.py,sha256=DLQKJ7gB0foPwAmcrru7QdZnPRnhqDKpFCT-EaD3420,15612
@@ -9,9 +9,10 @@ explainiverse/core/explanation.py,sha256=498BbRYrNR-BOql78sENOsyWxgqLsBVZXn14lh-
9
9
  explainiverse/core/registry.py,sha256=6HttL27Ty4jYtugRf-EDIKPy80M8BfvUppAKwwGDyQ8,27207
10
10
  explainiverse/engine/__init__.py,sha256=1sZO8nH1mmwK2e-KUavBQm7zYDWUe27nyWoFy9tgsiA,197
11
11
  explainiverse/engine/suite.py,sha256=G-7OjESisSTaQ1FQrlPl4YydX13uz8Bb70hJZNlcl2M,8918
12
- explainiverse/evaluation/__init__.py,sha256=ePE97KwSjg_IChZ03DeQax8GruTjx-BVrMSi_nzoyoA,1501
12
+ explainiverse/evaluation/__init__.py,sha256=XFVnmwrRtHHhtxI_yOw_nsR67pJvH-IBO_lEUVI-eDE,1957
13
13
  explainiverse/evaluation/_utils.py,sha256=ej7YOPZ90gVHuuIMj45EXHq9Jx3QG7lhaj5sk26hRpg,10519
14
14
  explainiverse/evaluation/faithfulness.py,sha256=_40afOW6vJ3dQguHlJySlgWqiJF_xIvN-uVA3nPKRvI,14841
15
+ explainiverse/evaluation/faithfulness_extended.py,sha256=0zHcmINNA88EJcKOY04Z384S3QhBMo7W2m3lGNkUiNQ,14690
15
16
  explainiverse/evaluation/metrics.py,sha256=snNK9Ua1VzHDT6DlrhYL4m2MmRF3X15vuuVXiHbeicU,9944
16
17
  explainiverse/evaluation/stability.py,sha256=q2d3rpxpp0X1s6ADST1iZA4tzksLJpR0mYBnA_U5FIs,12090
17
18
  explainiverse/explainers/__init__.py,sha256=-ncRXbFKahH3bR0oXM2UQM4LtTdTlvdeprL6cHeqNBs,2549
@@ -38,7 +39,7 @@ explainiverse/explainers/gradient/smoothgrad.py,sha256=COIKZSFcApmMkA62M0AForHiY
38
39
  explainiverse/explainers/gradient/tcav.py,sha256=zc-8wMsc2ZOhUeSZNBJ6H6BPXlVMJ9DRcAMiL25wU9I,32242
39
40
  explainiverse/explainers/rule_based/__init__.py,sha256=gKzlFCAzwurAMLJcuYgal4XhDj1thteBGcaHWmN7iWk,243
40
41
  explainiverse/explainers/rule_based/anchors_wrapper.py,sha256=ML7W6aam-eMGZHy5ilol8qupZvNBJpYAFatEEPnuMyo,13254
41
- explainiverse-0.8.1.dist-info/LICENSE,sha256=28rbHe8rJgmUlRdxJACfq1Sj-MtCEhyHxkJedQd1ZYA,1070
42
- explainiverse-0.8.1.dist-info/METADATA,sha256=A5NjgO7v2I-P8sjFW8yaJCd_VcU_Dqumq2iRdl0_Fbc,23770
43
- explainiverse-0.8.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
44
- explainiverse-0.8.1.dist-info/RECORD,,
42
+ explainiverse-0.8.2.dist-info/LICENSE,sha256=28rbHe8rJgmUlRdxJACfq1Sj-MtCEhyHxkJedQd1ZYA,1070
43
+ explainiverse-0.8.2.dist-info/METADATA,sha256=QSLwIr4RmoHpxqIfoarJX17alA-0esXfdNa1cemWu5s,23770
44
+ explainiverse-0.8.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
45
+ explainiverse-0.8.2.dist-info/RECORD,,