scope-rx 2.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. scope_rx-2.0.0/LICENSE +21 -0
  2. scope_rx-2.0.0/PKG-INFO +361 -0
  3. scope_rx-2.0.0/README.md +297 -0
  4. scope_rx-2.0.0/pyproject.toml +173 -0
  5. scope_rx-2.0.0/scope_rx/__init__.py +178 -0
  6. scope_rx-2.0.0/scope_rx/cli.py +355 -0
  7. scope_rx-2.0.0/scope_rx/core/__init__.py +16 -0
  8. scope_rx-2.0.0/scope_rx/core/base.py +486 -0
  9. scope_rx-2.0.0/scope_rx/core/scope.py +349 -0
  10. scope_rx-2.0.0/scope_rx/core/wrapper.py +382 -0
  11. scope_rx-2.0.0/scope_rx/methods/__init__.py +60 -0
  12. scope_rx-2.0.0/scope_rx/methods/attention/__init__.py +18 -0
  13. scope_rx-2.0.0/scope_rx/methods/attention/flow.py +196 -0
  14. scope_rx-2.0.0/scope_rx/methods/attention/raw.py +165 -0
  15. scope_rx-2.0.0/scope_rx/methods/attention/rollout.py +235 -0
  16. scope_rx-2.0.0/scope_rx/methods/gradient/__init__.py +30 -0
  17. scope_rx-2.0.0/scope_rx/methods/gradient/gradcam.py +177 -0
  18. scope_rx-2.0.0/scope_rx/methods/gradient/gradcam_plusplus.py +170 -0
  19. scope_rx-2.0.0/scope_rx/methods/gradient/guided_backprop.py +133 -0
  20. scope_rx-2.0.0/scope_rx/methods/gradient/integrated_gradients.py +268 -0
  21. scope_rx-2.0.0/scope_rx/methods/gradient/layercam.py +141 -0
  22. scope_rx-2.0.0/scope_rx/methods/gradient/scorecam.py +201 -0
  23. scope_rx-2.0.0/scope_rx/methods/gradient/smoothgrad.py +170 -0
  24. scope_rx-2.0.0/scope_rx/methods/gradient/vanilla.py +113 -0
  25. scope_rx-2.0.0/scope_rx/methods/model_agnostic/__init__.py +15 -0
  26. scope_rx-2.0.0/scope_rx/methods/model_agnostic/kernel_shap.py +299 -0
  27. scope_rx-2.0.0/scope_rx/methods/model_agnostic/lime_explainer.py +250 -0
  28. scope_rx-2.0.0/scope_rx/methods/perturbation/__init__.py +18 -0
  29. scope_rx-2.0.0/scope_rx/methods/perturbation/meaningful_perturbation.py +217 -0
  30. scope_rx-2.0.0/scope_rx/methods/perturbation/occlusion.py +155 -0
  31. scope_rx-2.0.0/scope_rx/methods/perturbation/rise.py +194 -0
  32. scope_rx-2.0.0/scope_rx/metrics/__init__.py +42 -0
  33. scope_rx-2.0.0/scope_rx/metrics/faithfulness.py +271 -0
  34. scope_rx-2.0.0/scope_rx/metrics/sensitivity.py +114 -0
  35. scope_rx-2.0.0/scope_rx/metrics/stability.py +107 -0
  36. scope_rx-2.0.0/scope_rx/utils/__init__.py +38 -0
  37. scope_rx-2.0.0/scope_rx/utils/postprocessing.py +121 -0
  38. scope_rx-2.0.0/scope_rx/utils/preprocessing.py +150 -0
  39. scope_rx-2.0.0/scope_rx/utils/tensor.py +67 -0
  40. scope_rx-2.0.0/scope_rx/visualization/__init__.py +25 -0
  41. scope_rx-2.0.0/scope_rx/visualization/export.py +162 -0
  42. scope_rx-2.0.0/scope_rx/visualization/plots.py +248 -0
  43. scope_rx-2.0.0/scope_rx.egg-info/PKG-INFO +361 -0
  44. scope_rx-2.0.0/scope_rx.egg-info/SOURCES.txt +54 -0
  45. scope_rx-2.0.0/scope_rx.egg-info/dependency_links.txt +1 -0
  46. scope_rx-2.0.0/scope_rx.egg-info/entry_points.txt +2 -0
  47. scope_rx-2.0.0/scope_rx.egg-info/requires.txt +37 -0
  48. scope_rx-2.0.0/scope_rx.egg-info/top_level.txt +1 -0
  49. scope_rx-2.0.0/setup.cfg +4 -0
  50. scope_rx-2.0.0/tests/test_attention.py +106 -0
  51. scope_rx-2.0.0/tests/test_core.py +156 -0
  52. scope_rx-2.0.0/tests/test_gradient_methods.py +168 -0
  53. scope_rx-2.0.0/tests/test_metrics.py +131 -0
  54. scope_rx-2.0.0/tests/test_perturbation_methods.py +109 -0
  55. scope_rx-2.0.0/tests/test_utils_new.py +141 -0
  56. scope_rx-2.0.0/tests/test_visualization_new.py +88 -0
scope_rx-2.0.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024-2026 ScopeRX Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,361 @@
1
+ Metadata-Version: 2.4
2
+ Name: scope-rx
3
+ Version: 2.0.0
4
+ Summary: A comprehensive neural network explainability and interpretability library
5
+ Author-email: XCALEN <desenyon@gmail.com>
6
+ Maintainer-email: XCALEN <desenyon@gmail.com>
7
+ License: MIT
8
+ Project-URL: Homepage, https://github.com/xcalen/scope-rx
9
+ Project-URL: Documentation, https://scope-rx.readthedocs.io
10
+ Project-URL: Repository, https://github.com/xcalen/scope-rx
11
+ Project-URL: Issues, https://github.com/xcalen/scope-rx/issues
12
+ Project-URL: Changelog, https://github.com/xcalen/scope-rx/blob/main/CHANGELOG.md
13
+ Keywords: deep-learning,explainability,interpretability,neural-networks,pytorch,gradcam,integrated-gradients,shap,lime,attention,xai,machine-learning,computer-vision,transformers
14
+ Classifier: Development Status :: 5 - Production/Stable
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: Intended Audience :: Science/Research
17
+ Classifier: License :: OSI Approved :: MIT License
18
+ Classifier: Operating System :: OS Independent
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
25
+ Classifier: Topic :: Scientific/Engineering :: Image Processing
26
+ Classifier: Topic :: Scientific/Engineering :: Visualization
27
+ Requires-Python: >=3.9
28
+ Description-Content-Type: text/markdown
29
+ License-File: LICENSE
30
+ Requires-Dist: torch>=1.10.0
31
+ Requires-Dist: numpy>=1.21.0
32
+ Requires-Dist: matplotlib>=3.5.0
33
+ Requires-Dist: opencv-python>=4.5.0
34
+ Requires-Dist: scipy>=1.7.0
35
+ Requires-Dist: pillow>=8.0.0
36
+ Requires-Dist: tqdm>=4.60.0
37
+ Requires-Dist: scikit-learn>=1.0.0
38
+ Requires-Dist: scikit-image>=0.19.0
39
+ Provides-Extra: dev
40
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
41
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
42
+ Requires-Dist: black>=23.0.0; extra == "dev"
43
+ Requires-Dist: isort>=5.12.0; extra == "dev"
44
+ Requires-Dist: flake8>=6.0.0; extra == "dev"
45
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
46
+ Requires-Dist: pre-commit>=3.0.0; extra == "dev"
47
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
48
+ Provides-Extra: interactive
49
+ Requires-Dist: plotly>=5.10.0; extra == "interactive"
50
+ Requires-Dist: ipywidgets>=8.0.0; extra == "interactive"
51
+ Requires-Dist: jupyter>=1.0.0; extra == "interactive"
52
+ Provides-Extra: full
53
+ Requires-Dist: shap>=0.42.0; extra == "full"
54
+ Requires-Dist: lime>=0.2.0; extra == "full"
55
+ Requires-Dist: captum>=0.6.0; extra == "full"
56
+ Requires-Dist: torchvision>=0.12.0; extra == "full"
57
+ Requires-Dist: transformers>=4.20.0; extra == "full"
58
+ Provides-Extra: docs
59
+ Requires-Dist: sphinx>=6.0.0; extra == "docs"
60
+ Requires-Dist: sphinx-rtd-theme>=1.2.0; extra == "docs"
61
+ Requires-Dist: sphinx-autodoc-typehints>=1.22.0; extra == "docs"
62
+ Requires-Dist: myst-parser>=1.0.0; extra == "docs"
63
+ Dynamic: license-file
64
+
65
+ # ScopeRX
66
+
67
+ **Neural Network Explainability and Interpretability Library**
68
+
69
+ [![Python 3.9+](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![CI](https://github.com/xcalen/scope-rx/actions/workflows/ci.yml/badge.svg)](https://github.com/xcalen/scope-rx/actions/workflows/ci.yml) [![Codecov](https://codecov.io/gh/xcalen/scope-rx/branch/main/graph/badge.svg)](https://codecov.io/gh/xcalen/scope-rx)
70
+
71
+ ScopeRX is a comprehensive, production-grade Python library for explaining and interpreting neural network predictions. It provides state-of-the-art attribution methods, evaluation metrics, and visualization tools - all unified under a simple, intuitive API.
72
+
73
+ ## Version 2.0.0 Updates
74
+ - **Type Safety**: Fully typed codebase with `mypy` compliance.
75
+ - **Production Grade**: Enhanced stability, error handling, and performance optimizations.
76
+ - **CI/CD**: Automated testing and linting pipelines.
77
+ - **Improved Methods**: Refactored `KernelSHAP`, `RISE`, and Attention methods for better accuracy and speed.
78
+
79
+ ## Features
80
+
81
+ - **15+ Explanation Methods**: From classic GradCAM to cutting-edge RISE and attention methods
82
+ - **Unified API**: One interface to rule them all - switch between methods with a single parameter
83
+ - **Evaluation Metrics**: Faithfulness, sensitivity, and stability metrics to quantify explanation quality
84
+ - **Beautiful Visualizations**: Publication-ready plots with minimal code
85
+ - **Model Agnostic**: Works with any PyTorch model architecture
86
+ - **Transformer Support**: Dedicated attention visualization for Vision Transformers
87
+ - **CLI Tool**: Generate explanations from the command line
88
+
89
+ ## Installation
90
+
91
+ ```bash
92
+ pip install scope-rx
93
+ ```
94
+
95
+ **With optional dependencies:**
96
+
97
+ ```bash
98
+ # For interactive Plotly visualizations
99
+ pip install scope-rx[interactive]
100
+
101
+ # For development
102
+ pip install scope-rx[dev]
103
+
104
+ # Full installation with all extras
105
+ pip install scope-rx[full]
106
+ ```
107
+
108
+ ## Quick Start
109
+
110
+ ```python
111
+ from scope_rx import ScopeRX
112
+ import torch
113
+ import torchvision.models as models
114
+
115
+ # Load your model
116
+ model = models.resnet50(pretrained=True)
117
+ model.eval()
118
+
119
+ # Create explainer
120
+ explainer = ScopeRX(model)
121
+
122
+ # Generate explanation
123
+ result = explainer.explain(
124
+ input_tensor,
125
+ method='gradcam',
126
+ target_class=predicted_class
127
+ )
128
+
129
+ # Visualize
130
+ result.visualize()
131
+
132
+ # Or save to file
133
+ result.save("explanation.png")
134
+ ```
135
+
136
+ ## Available Methods
137
+
138
+ ### Gradient-Based Methods
139
+
140
+ | Method | Description | Use Case |
141
+ | ------------------------ | ------------------------------------------ | ----------------------------------- |
142
+ | `gradcam` | Gradient-weighted Class Activation Mapping | General CNN visualization |
143
+ | `gradcam++` | Improved GradCAM with better localization | Multiple object instances |
144
+ | `scorecam` | Score-based CAM (gradient-free) | When gradients are unstable |
145
+ | `layercam` | Layer-wise CAM | Fine-grained attribution |
146
+ | `smoothgrad` | Noise-smoothed gradients | Reducing gradient noise |
147
+ | `integrated_gradients` | Axiomatic attribution method | Theoretically grounded explanations |
148
+ | `vanilla` | Simple input gradients | Quick baseline |
149
+ | `guided_backprop` | Guided backpropagation | High-resolution visualization |
150
+
151
+ ### Perturbation-Based Methods
152
+
153
+ | Method | Description | Use Case |
154
+ | --------------------------- | ------------------------------ | -------------------------------- |
155
+ | `occlusion` | Sliding window occlusion | Understanding spatial importance |
156
+ | `rise` | Randomized Input Sampling | Black-box models |
157
+ | `meaningful_perturbation` | Optimized minimal perturbation | Finding minimal explanations |
158
+
159
+ ### Model-Agnostic Methods
160
+
161
+ | Method | Description | Use Case |
162
+ | --------------- | -------------------------------- | ------------------------------ |
163
+ | `kernel_shap` | Kernel SHAP approximation | Shapley value estimation |
164
+ | `lime` | Local Interpretable Explanations | Interpretable local surrogates |
165
+
166
+ ### Attention-Based Methods (for Transformers)
167
+
168
+ | Method | Description | Use Case |
169
+ | --------------------- | ---------------------------- | ----------------------------- |
170
+ | `attention_rollout` | Attention weight aggregation | Vision Transformers |
171
+ | `attention_flow` | Attention flow propagation | Understanding attention paths |
172
+ | `raw_attention` | Raw attention weights | Quick attention inspection |
173
+
174
+ ## Compare Methods
175
+
176
+ ```python
177
+ from scope_rx import ScopeRX
178
+
179
+ explainer = ScopeRX(model)
180
+
181
+ # Compare multiple methods at once
182
+ results = explainer.compare_methods(
183
+ input_tensor,
184
+ methods=['gradcam', 'smoothgrad', 'integrated_gradients', 'rise'],
185
+ target_class=predicted_class
186
+ )
187
+
188
+ # Visualize comparison
189
+ from scope_rx.visualization import plot_comparison
190
+ plot_comparison({name: r.attribution for name, r in results.items()})
191
+ ```
192
+
193
+ ## Evaluate Explanations
194
+
195
+ ```python
196
+ from scope_rx.metrics import (
197
+ faithfulness_score,
198
+ insertion_deletion_auc,
199
+ sensitivity_score,
200
+ stability_score
201
+ )
202
+
203
+ # Faithfulness: Does the explanation reflect model behavior?
204
+ faith = faithfulness_score(model, input_tensor, attribution, target_class=0)
205
+
206
+ # Insertion/Deletion: How does model output change as we add/remove important features?
207
+ scores = insertion_deletion_auc(model, input_tensor, attribution, target_class=0)
208
+ print(f"Insertion AUC: {scores['insertion_auc']:.3f}")
209
+ print(f"Deletion AUC: {scores['deletion_auc']:.3f}")
210
+
211
+ # Sensitivity: Are explanations sensitive to meaningful changes?
212
+ sens = sensitivity_score(explainer, input_tensor, target_class=0)
213
+
214
+ # Stability: Are explanations stable across similar inputs?
215
+ stab = stability_score(explainer, input_tensor, target_class=0)
216
+ ```
217
+
218
+ ## Visualization
219
+
220
+ ```python
221
+ from scope_rx.visualization import (
222
+ plot_attribution,
223
+ plot_comparison,
224
+ overlay_attribution,
225
+ create_interactive_plot,
226
+ export_visualization
227
+ )
228
+
229
+ # Simple plot
230
+ plot_attribution(attribution, image=original_image)
231
+
232
+ # Interactive Plotly plot
233
+ fig = create_interactive_plot(attribution, image=original_image)
234
+ fig.show()
235
+
236
+ # Export to various formats
237
+ export_visualization(attribution, "output.png", colormap="jet")
238
+ export_visualization(attribution, "output.npy") # Raw numpy array
239
+ ```
240
+
241
+ ## Command Line Interface
242
+
243
+ ```bash
244
+ # Generate explanation
245
+ scope-rx explain image.jpg --model resnet50 --method gradcam --output heatmap.png
246
+
247
+ # Compare methods
248
+ scope-rx compare image.jpg --model resnet50 --methods gradcam,smoothgrad,rise
249
+
250
+ # List available methods
251
+ scope-rx list-methods
252
+
253
+ # Show model layers (for layer selection)
254
+ scope-rx show-layers --model resnet50
255
+ ```
256
+
257
+ ## Advanced Usage
258
+
259
+ ### Custom Target Layers
260
+
261
+ ```python
262
+ from scope_rx import GradCAM
263
+
264
+ # Specify exact layer
265
+ explainer = GradCAM(model, target_layer="layer4.1.conv2")
266
+ ```
267
+
268
+ ### Custom Baselines for Integrated Gradients
269
+
270
+ ```python
271
+ from scope_rx import IntegratedGradients
272
+
273
+ # Use different baselines
274
+ explainer = IntegratedGradients(
275
+ model,
276
+ n_steps=50,
277
+ baseline="blur" # Options: "zero", "random", "blur"
278
+ )
279
+ ```
280
+
281
+ ### Batch Processing
282
+
283
+ ```python
284
+ from scope_rx import ScopeRX
285
+ from scope_rx.utils import preprocess_image
286
+ from pathlib import Path
287
+
288
+ explainer = ScopeRX(model)
289
+
290
+ # Process multiple images
291
+ for image_path in image_paths:
292
+ input_tensor = preprocess_image(image_path)
293
+ result = explainer.explain(input_tensor, method='gradcam')
294
+ result.save(f"explanations/{Path(image_path).stem}.png")
295
+ ```
296
+
297
+ ### Using Individual Explainers
298
+
299
+ ```python
300
+ from scope_rx import GradCAM, SmoothGrad, RISE
301
+
302
+ # Use specific explainer directly
303
+ gradcam = GradCAM(model, target_layer="layer4")
304
+ result = gradcam.explain(input_tensor, target_class=0)
305
+
306
+ # SmoothGrad with custom parameters
307
+ smoothgrad = SmoothGrad(model, n_samples=50, noise_level=0.2)
308
+ result = smoothgrad.explain(input_tensor, target_class=0)
309
+ ```
310
+
311
+ ## Testing
312
+
313
+ ```bash
314
+ # Run all tests
315
+ pytest tests/
316
+
317
+ # Run with coverage
318
+ pytest tests/ --cov=scope_rx --cov-report=html
319
+
320
+ # Run specific test module
321
+ pytest tests/test_gradient_methods.py -v
322
+ ```
323
+
324
+ ## Documentation
325
+
326
+ For full documentation, visit our [documentation site](https://github.com/xcalen/scope-rx/docs).
327
+
328
+ ## Contributing
329
+
330
+ We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
331
+
332
+ ## License
333
+
334
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
335
+
336
+ ## Citation
337
+
338
+ If you use ScopeRX in your research, please cite:
339
+
340
+ ```bibtex
341
+ @software{scoperx2024,
342
+ title = {ScopeRX: Neural Network Explainability Library},
343
+ author = {XCALEN},
344
+ year = {2024},
345
+ url = {https://github.com/xcalen/scope-rx}
346
+ }
347
+ ```
348
+
349
+ ## Acknowledgments
350
+
351
+ ScopeRX builds upon the excellent work of the interpretability research community. Special thanks to the authors of:
352
+
353
+ - GradCAM, GradCAM++, ScoreCAM, LayerCAM
354
+ - SHAP and LIME
355
+ - Integrated Gradients
356
+ - RISE
357
+ - And many others who have contributed to the field of explainable AI
358
+
359
+ ---
360
+
361
+ **Made with love by Desenyon**
@@ -0,0 +1,297 @@
1
+ # ScopeRX
2
+
3
+ **Neural Network Explainability and Interpretability Library**
4
+
5
+ [![Python 3.9+](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![CI](https://github.com/xcalen/scope-rx/actions/workflows/ci.yml/badge.svg)](https://github.com/xcalen/scope-rx/actions/workflows/ci.yml) [![Codecov](https://codecov.io/gh/xcalen/scope-rx/branch/main/graph/badge.svg)](https://codecov.io/gh/xcalen/scope-rx)
6
+
7
+ ScopeRX is a comprehensive, production-grade Python library for explaining and interpreting neural network predictions. It provides state-of-the-art attribution methods, evaluation metrics, and visualization tools - all unified under a simple, intuitive API.
8
+
9
+ ## Version 2.0.0 Updates
10
+ - **Type Safety**: Fully typed codebase with `mypy` compliance.
11
+ - **Production Grade**: Enhanced stability, error handling, and performance optimizations.
12
+ - **CI/CD**: Automated testing and linting pipelines.
13
+ - **Improved Methods**: Refactored `KernelSHAP`, `RISE`, and Attention methods for better accuracy and speed.
14
+
15
+ ## Features
16
+
17
+ - **15+ Explanation Methods**: From classic GradCAM to cutting-edge RISE and attention methods
18
+ - **Unified API**: One interface to rule them all - switch between methods with a single parameter
19
+ - **Evaluation Metrics**: Faithfulness, sensitivity, and stability metrics to quantify explanation quality
20
+ - **Beautiful Visualizations**: Publication-ready plots with minimal code
21
+ - **Model Agnostic**: Works with any PyTorch model architecture
22
+ - **Transformer Support**: Dedicated attention visualization for Vision Transformers
23
+ - **CLI Tool**: Generate explanations from the command line
24
+
25
+ ## Installation
26
+
27
+ ```bash
28
+ pip install scope-rx
29
+ ```
30
+
31
+ **With optional dependencies:**
32
+
33
+ ```bash
34
+ # For interactive Plotly visualizations
35
+ pip install scope-rx[interactive]
36
+
37
+ # For development
38
+ pip install scope-rx[dev]
39
+
40
+ # Full installation with all extras
41
+ pip install scope-rx[full]
42
+ ```
43
+
44
+ ## Quick Start
45
+
46
+ ```python
47
+ from scope_rx import ScopeRX
48
+ import torch
49
+ import torchvision.models as models
50
+
51
+ # Load your model
52
+ model = models.resnet50(pretrained=True)
53
+ model.eval()
54
+
55
+ # Create explainer
56
+ explainer = ScopeRX(model)
57
+
58
+ # Generate explanation
59
+ result = explainer.explain(
60
+ input_tensor,
61
+ method='gradcam',
62
+ target_class=predicted_class
63
+ )
64
+
65
+ # Visualize
66
+ result.visualize()
67
+
68
+ # Or save to file
69
+ result.save("explanation.png")
70
+ ```
71
+
72
+ ## Available Methods
73
+
74
+ ### Gradient-Based Methods
75
+
76
+ | Method | Description | Use Case |
77
+ | ------------------------ | ------------------------------------------ | ----------------------------------- |
78
+ | `gradcam` | Gradient-weighted Class Activation Mapping | General CNN visualization |
79
+ | `gradcam++` | Improved GradCAM with better localization | Multiple object instances |
80
+ | `scorecam` | Score-based CAM (gradient-free) | When gradients are unstable |
81
+ | `layercam` | Layer-wise CAM | Fine-grained attribution |
82
+ | `smoothgrad` | Noise-smoothed gradients | Reducing gradient noise |
83
+ | `integrated_gradients` | Axiomatic attribution method | Theoretically grounded explanations |
84
+ | `vanilla` | Simple input gradients | Quick baseline |
85
+ | `guided_backprop` | Guided backpropagation | High-resolution visualization |
86
+
87
+ ### Perturbation-Based Methods
88
+
89
+ | Method | Description | Use Case |
90
+ | --------------------------- | ------------------------------ | -------------------------------- |
91
+ | `occlusion` | Sliding window occlusion | Understanding spatial importance |
92
+ | `rise` | Randomized Input Sampling | Black-box models |
93
+ | `meaningful_perturbation` | Optimized minimal perturbation | Finding minimal explanations |
94
+
95
+ ### Model-Agnostic Methods
96
+
97
+ | Method | Description | Use Case |
98
+ | --------------- | -------------------------------- | ------------------------------ |
99
+ | `kernel_shap` | Kernel SHAP approximation | Shapley value estimation |
100
+ | `lime` | Local Interpretable Explanations | Interpretable local surrogates |
101
+
102
+ ### Attention-Based Methods (for Transformers)
103
+
104
+ | Method | Description | Use Case |
105
+ | --------------------- | ---------------------------- | ----------------------------- |
106
+ | `attention_rollout` | Attention weight aggregation | Vision Transformers |
107
+ | `attention_flow` | Attention flow propagation | Understanding attention paths |
108
+ | `raw_attention` | Raw attention weights | Quick attention inspection |
109
+
110
+ ## Compare Methods
111
+
112
+ ```python
113
+ from scope_rx import ScopeRX
114
+
115
+ explainer = ScopeRX(model)
116
+
117
+ # Compare multiple methods at once
118
+ results = explainer.compare_methods(
119
+ input_tensor,
120
+ methods=['gradcam', 'smoothgrad', 'integrated_gradients', 'rise'],
121
+ target_class=predicted_class
122
+ )
123
+
124
+ # Visualize comparison
125
+ from scope_rx.visualization import plot_comparison
126
+ plot_comparison({name: r.attribution for name, r in results.items()})
127
+ ```
128
+
129
+ ## Evaluate Explanations
130
+
131
+ ```python
132
+ from scope_rx.metrics import (
133
+ faithfulness_score,
134
+ insertion_deletion_auc,
135
+ sensitivity_score,
136
+ stability_score
137
+ )
138
+
139
+ # Faithfulness: Does the explanation reflect model behavior?
140
+ faith = faithfulness_score(model, input_tensor, attribution, target_class=0)
141
+
142
+ # Insertion/Deletion: How does model output change as we add/remove important features?
143
+ scores = insertion_deletion_auc(model, input_tensor, attribution, target_class=0)
144
+ print(f"Insertion AUC: {scores['insertion_auc']:.3f}")
145
+ print(f"Deletion AUC: {scores['deletion_auc']:.3f}")
146
+
147
+ # Sensitivity: Are explanations sensitive to meaningful changes?
148
+ sens = sensitivity_score(explainer, input_tensor, target_class=0)
149
+
150
+ # Stability: Are explanations stable across similar inputs?
151
+ stab = stability_score(explainer, input_tensor, target_class=0)
152
+ ```
153
+
154
+ ## Visualization
155
+
156
+ ```python
157
+ from scope_rx.visualization import (
158
+ plot_attribution,
159
+ plot_comparison,
160
+ overlay_attribution,
161
+ create_interactive_plot,
162
+ export_visualization
163
+ )
164
+
165
+ # Simple plot
166
+ plot_attribution(attribution, image=original_image)
167
+
168
+ # Interactive Plotly plot
169
+ fig = create_interactive_plot(attribution, image=original_image)
170
+ fig.show()
171
+
172
+ # Export to various formats
173
+ export_visualization(attribution, "output.png", colormap="jet")
174
+ export_visualization(attribution, "output.npy") # Raw numpy array
175
+ ```
176
+
177
+ ## Command Line Interface
178
+
179
+ ```bash
180
+ # Generate explanation
181
+ scope-rx explain image.jpg --model resnet50 --method gradcam --output heatmap.png
182
+
183
+ # Compare methods
184
+ scope-rx compare image.jpg --model resnet50 --methods gradcam,smoothgrad,rise
185
+
186
+ # List available methods
187
+ scope-rx list-methods
188
+
189
+ # Show model layers (for layer selection)
190
+ scope-rx show-layers --model resnet50
191
+ ```
192
+
193
+ ## Advanced Usage
194
+
195
+ ### Custom Target Layers
196
+
197
+ ```python
198
+ from scope_rx import GradCAM
199
+
200
+ # Specify exact layer
201
+ explainer = GradCAM(model, target_layer="layer4.1.conv2")
202
+ ```
203
+
204
+ ### Custom Baselines for Integrated Gradients
205
+
206
+ ```python
207
+ from scope_rx import IntegratedGradients
208
+
209
+ # Use different baselines
210
+ explainer = IntegratedGradients(
211
+ model,
212
+ n_steps=50,
213
+ baseline="blur" # Options: "zero", "random", "blur"
214
+ )
215
+ ```
216
+
217
+ ### Batch Processing
218
+
219
+ ```python
220
+ from scope_rx import ScopeRX
221
+ from scope_rx.utils import preprocess_image
222
+ from pathlib import Path
223
+
224
+ explainer = ScopeRX(model)
225
+
226
+ # Process multiple images
227
+ for image_path in image_paths:
228
+ input_tensor = preprocess_image(image_path)
229
+ result = explainer.explain(input_tensor, method='gradcam')
230
+ result.save(f"explanations/{Path(image_path).stem}.png")
231
+ ```
232
+
233
+ ### Using Individual Explainers
234
+
235
+ ```python
236
+ from scope_rx import GradCAM, SmoothGrad, RISE
237
+
238
+ # Use specific explainer directly
239
+ gradcam = GradCAM(model, target_layer="layer4")
240
+ result = gradcam.explain(input_tensor, target_class=0)
241
+
242
+ # SmoothGrad with custom parameters
243
+ smoothgrad = SmoothGrad(model, n_samples=50, noise_level=0.2)
244
+ result = smoothgrad.explain(input_tensor, target_class=0)
245
+ ```
246
+
247
+ ## Testing
248
+
249
+ ```bash
250
+ # Run all tests
251
+ pytest tests/
252
+
253
+ # Run with coverage
254
+ pytest tests/ --cov=scope_rx --cov-report=html
255
+
256
+ # Run specific test module
257
+ pytest tests/test_gradient_methods.py -v
258
+ ```
259
+
260
+ ## Documentation
261
+
262
+ For full documentation, visit our [documentation site](https://github.com/xcalen/scope-rx/docs).
263
+
264
+ ## Contributing
265
+
266
+ We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
267
+
268
+ ## License
269
+
270
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
271
+
272
+ ## Citation
273
+
274
+ If you use ScopeRX in your research, please cite:
275
+
276
+ ```bibtex
277
+ @software{scoperx2024,
278
+ title = {ScopeRX: Neural Network Explainability Library},
279
+ author = {XCALEN},
280
+ year = {2024},
281
+ url = {https://github.com/xcalen/scope-rx}
282
+ }
283
+ ```
284
+
285
+ ## Acknowledgments
286
+
287
+ ScopeRX builds upon the excellent work of the interpretability research community. Special thanks to the authors of:
288
+
289
+ - GradCAM, GradCAM++, ScoreCAM, LayerCAM
290
+ - SHAP and LIME
291
+ - Integrated Gradients
292
+ - RISE
293
+ - And many others who have contributed to the field of explainable AI
294
+
295
+ ---
296
+
297
+ **Made with love by Desenyon**