saliencytools 0.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2022-2024 six-two
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: saliencytools
3
+ Version: 0.30
4
+ Summary: A collection of metrics for comparing saliency maps
5
+ Home-page: https://github.com/valevalerio/saliencytools
6
+ Author: Valerio Bonsignori
7
+ Author-email: Valerio Bonsignori <valerio.bonsignori@phd.unipi.it>
8
+ License-Expression: MIT
9
+ Project-URL: Homepage, https://github.com/valevalerio/saliencytools
10
+ Project-URL: Issues, https://github.com/valevalerio/saliencytools/issues
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Operating System :: OS Independent
13
+ Requires-Python: >=3.10
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE
16
+ Dynamic: author
17
+ Dynamic: home-page
18
+ Dynamic: license-file
19
+ Dynamic: requires-python
20
+
21
+ # Saliency Metrics
22
+
23
+ ![Tests](https://github.com/valevalerio/saliencytools/actions/workflows/test.yml/badge.svg)
24
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
25
+ [![PyPI version](https://img.shields.io/pypi/v/saliencytools)](https://pypi.org/project/saliencytools/)
26
+ [![Documentation Status](https://img.shields.io/website?url=https://valevalerio.github.io/saliencytools/)](https://valevalerio.github.io/saliencytools/)
27
+
28
+
29
+
30
+ **Saliency Metrics** is a Python package that implements various metrics for comparing saliency maps generated by explanation methods. To ensure fair comparisons, metrics should be computed on the same saliency map and corresponding ground truth map.
31
+ The package includes the following metrics:
32
+ - **SSIM (Structural Similarity Index)**: A perceptual metric that quantifies the similarity between two images. It considers changes in structural information, luminance, and contrast.
33
+ - **PSNR (Peak Signal-to-Noise Ratio)**: A metric that measures the ratio between the maximum possible power of a signal and the power of corrupting noise. It is often used to assess the quality of reconstructed images.
34
+ - **EMD (Earth Mover's Distance)**: A metric that measures the distance between two probability distributions over a region D. It is often used in computer vision and image retrieval tasks.
35
+
36
+ ```tutorial.ipynb``` is an original way used to check and test the different metrics.
37
+ # Installation
38
+
39
+ ```pip install saliencytools```
40
+
41
+ ### This module is a work in progress and is not yet complete.
42
+
43
+ # Usage
44
+
45
+ ```python
46
+ from saliencytools import ssim, psnr, emd
47
+
48
+ import numpy as np
49
+ import matplotlib.pyplot as plt
50
+
51
+
52
+
53
+ # create a random saliency map
54
+ saliency_map = np.random.rand(28*28).reshape(28, 28)
55
+ # create a random ground truth map
56
+ ground_truth_map = np.random.rand(28*28).reshape(28, 28)
57
+ # create a random binary mask
58
+
59
+ # use all the metrics to compare the saliency map with the ground truth map
60
+ for metric in [ssim, psnr, emd]:
61
+
62
+ print(f"{metric.__name__}: {metric(saliency_map, ground_truth_map)}")
63
+
64
+
65
+ ```
@@ -0,0 +1,45 @@
1
+ # Saliency Metrics
2
+
3
+ ![Tests](https://github.com/valevalerio/saliencytools/actions/workflows/test.yml/badge.svg)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
5
+ [![PyPI version](https://img.shields.io/pypi/v/saliencytools)](https://pypi.org/project/saliencytools/)
6
+ [![Documentation Status](https://img.shields.io/website?url=https://valevalerio.github.io/saliencytools/)](https://valevalerio.github.io/saliencytools/)
7
+
8
+
9
+
10
+ **Saliency Metrics** is a Python package that implements various metrics for comparing saliency maps generated by explanation methods. To ensure fair comparisons, metrics should be computed on the same saliency map and corresponding ground truth map.
11
+ The package includes the following metrics:
12
+ - **SSIM (Structural Similarity Index)**: A perceptual metric that quantifies the similarity between two images. It considers changes in structural information, luminance, and contrast.
13
+ - **PSNR (Peak Signal-to-Noise Ratio)**: A metric that measures the ratio between the maximum possible power of a signal and the power of corrupting noise. It is often used to assess the quality of reconstructed images.
14
+ - **EMD (Earth Mover's Distance)**: A metric that measures the distance between two probability distributions over a region D. It is often used in computer vision and image retrieval tasks.
15
+
16
+ ```tutorial.ipynb``` is an original way used to check and test the different metrics.
17
+ # Installation
18
+
19
+ ```pip install saliencytools```
20
+
21
+ ### This module is a work in progress and is not yet complete.
22
+
23
+ # Usage
24
+
25
+ ```python
26
+ from saliencytools import ssim, psnr, emd
27
+
28
+ import numpy as np
29
+ import matplotlib.pyplot as plt
30
+
31
+
32
+
33
+ # create a random saliency map
34
+ saliency_map = np.random.rand(28*28).reshape(28, 28)
35
+ # create a random ground truth map
36
+ ground_truth_map = np.random.rand(28*28).reshape(28, 28)
37
+ # create a random binary mask
38
+
39
+ # use all the metrics to compare the saliency map with the ground truth map
40
+ for metric in [ssim, psnr, emd]:
41
+
42
+ print(f"{metric.__name__}: {metric(saliency_map, ground_truth_map)}")
43
+
44
+
45
+ ```
@@ -0,0 +1,19 @@
1
+ [project]
2
+ name = "saliencytools"
3
+ version = "0.30"
4
+ authors = [
5
+ { name="Valerio Bonsignori", email="valerio.bonsignori@phd.unipi.it" },
6
+ ]
7
+ description = "A collection of metrics for comparing saliency maps"
8
+ readme = "README.md"
9
+ requires-python = ">=3.9"
10
+ classifiers = [
11
+ "Programming Language :: Python :: 3",
12
+ "Operating System :: OS Independent",
13
+ ]
14
+ license = "MIT"
15
+ license-files = ["LICEN[CS]E*"]
16
+
17
+ [project.urls]
18
+ Homepage = "https://github.com/valevalerio/saliencytools"
19
+ Issues = "https://github.com/valevalerio/saliencytools/issues"
File without changes
@@ -0,0 +1,413 @@
1
+ """
2
+ This module contains functions to compare different saliency maps using various metrics.
3
+
4
+ Metrics implemented:
5
+ - ResNet Feature Similarity (exclude the last layer, compare the extracted features)
6
+ - ShapGap Cosine
7
+ - ShapGap L2
8
+ - Earth Mover's Distance (EMD)
9
+ - Mean Absolute Error (MAE)
10
+ - Sign Agreement Ratio (SAR)
11
+ - Sign Distance
12
+ - Intersection over Union (IoU)
13
+ - Correlation Distance
14
+ - Mean Squared Error (MSE)
15
+ - Peak Signal-to-Noise Ratio (PSNR)
16
+ - Czekanowski Distance
17
+ - Jaccard Index
18
+ - Jaccard Distance
19
+ - Structural Similarity Index Measure (SSIM)
20
+ """
21
+
22
+ import numpy as np
23
+ import torch.nn.functional as F
24
+ import torch
25
+ from skimage import metrics
26
+ from scipy.stats import wasserstein_distance
27
+
28
+ # Normalization functions
29
+ def normalize_mask(mask):
30
+ """
31
+ Normalize the mask to the range [-1, 1].
32
+
33
+ This function rescales the input saliency map to the range [-1, 1],
34
+ ensuring that the values are standardized for further processing.
35
+ This normalization is particularly useful when working with metrics
36
+ or models that expect inputs in this range.
37
+
38
+ Parameters:
39
+ mask (numpy.ndarray): Input saliency map. This is a 2D or 3D array
40
+ representing the saliency values of an image.
41
+
42
+ Returns:
43
+ numpy.ndarray: Normalized saliency map with values in the range [-1, 1].
44
+ """
45
+ mask = mask - np.min(mask)
46
+ mask = mask / np.max(mask)
47
+ mask = 2 * mask - 1
48
+ return mask
49
+
50
+ def normalize_mask_0_1(mask):
51
+ """
52
+ Normalize the input saliency map to the range [0, 1].
53
+
54
+ This function ensures that the values in the input saliency map are scaled
55
+ to lie within the range [0, 1]. This is useful for standardizing the input
56
+ data for further processing or comparison, especially when working with
57
+ metrics that require normalized inputs.
58
+
59
+ Parameters:
60
+ mask (numpy.ndarray): Input saliency map. This is a 2D or 3D array
61
+ representing the saliency values of an image,
62
+ where higher values indicate greater importance.
63
+
64
+ Returns:
65
+ numpy.ndarray: Normalized saliency map with values in the range [0, 1].
66
+ The output has the same shape as the input.
67
+ """
68
+ mask = mask - np.min(mask)
69
+ mask = mask / np.max(mask)
70
+ return mask
71
+
72
+ def clip_mask(mask):
73
+ """
74
+ Clip the mask to the range [-1, 1].
75
+
76
+ This function ensures that the values in the input saliency map do not
77
+ exceed the range [-1, 1]. This is useful for preventing outliers or
78
+ extreme values from affecting downstream computations.
79
+
80
+ Parameters:
81
+ mask (numpy.ndarray): Input saliency map. This is a 2D or 3D array
82
+ representing the saliency values of an image.
83
+
84
+ Returns:
85
+ numpy.ndarray: Clipped saliency map with values constrained to [-1, 1].
86
+ """
87
+ return np.clip(mask, -1, 1)
88
+
89
+ # Distance metrics
90
+ def euclidean_distance(a, b):
91
+ """
92
+ Compute the Euclidean distance between two images.
93
+
94
+ The Euclidean distance measures the straight-line distance between
95
+ corresponding pixels in two images. It captures the overall magnitude
96
+ of differences between the two images.
97
+
98
+ Reference:
99
+ Commonly used in image processing and computer vision literature, also known as Frobenius norm.
100
+
101
+ Parameters:
102
+ a (numpy.ndarray): First image.
103
+ b (numpy.ndarray): Second image.
104
+
105
+ Returns:
106
+ float: Euclidean distance, representing the magnitude of differences.
107
+ """
108
+ return np.sqrt(np.sum((a - b) ** 2))
109
+
110
+ def cosine_distance(a, b):
111
+ """
112
+ Compute the cosine distance between two vectors.
113
+
114
+ The cosine distance measures the angular difference between two vectors
115
+ in a high-dimensional space. It is useful for comparing the orientation
116
+ of two saliency maps rather than their magnitude.
117
+
118
+ Reference:
119
+ Commonly used in vector similarity and machine learning literature.
120
+
121
+
122
+ Parameters:
123
+ a (numpy.ndarray): First image.
124
+ b (numpy.ndarray): Second image.
125
+
126
+ Returns:
127
+ float: Cosine distance, representing the angular difference.
128
+ """
129
+ a = a.flatten()
130
+ b = b.flatten()
131
+ return 1 - np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
132
+
133
+ def emd(a, b):
134
+ """
135
+ Compute the Earth Mover's Distance (EMD) between two images.
136
+
137
+ The EMD measures the minimum cost of transforming one distribution
138
+ into another. It is particularly useful for comparing saliency maps
139
+ with spatial distributions of importance.
140
+
141
+ Reference:
142
+ Y. Rubner, C. Tomasi & L. J. Guibas (2000) "The earth mover's distance as a metric for image retrieval."
143
+
144
+ Parameters:
145
+ a (numpy.ndarray): First image.
146
+ b (numpy.ndarray): Second image.
147
+
148
+ Returns:
149
+ float: Earth Mover's Distance, representing the cost of transformation.
150
+ """
151
+ return wasserstein_distance(a.flatten(), b.flatten())
152
+
153
+ def mean_absolute_error(a, b):
154
+ """
155
+ Compute the Mean Absolute Error (MAE) between two images.
156
+
157
+ The MAE measures the average absolute difference between corresponding
158
+ pixels in two images. It captures the overall deviation in pixel values.
159
+
160
+ Reference:
161
+ Commonly used in regression analysis and image processing.
162
+
163
+ Parameters:
164
+ a (numpy.ndarray): First image.
165
+ b (numpy.ndarray): Second image.
166
+
167
+ Returns:
168
+ float: Mean Absolute Error, representing the average deviation.
169
+ """
170
+ return np.mean(np.abs(a - b))
171
+
172
+ def sign_agreement_ratio(a, b):
173
+ """
174
+ Compute the Sign Agreement Ratio (SAR) between two images.
175
+
176
+ The SAR measures the proportion of pixels where the signs of the values
177
+ in two images agree. It captures the consistency in the direction of
178
+ importance between two saliency maps.
179
+
180
+ Reference:
181
+ A. M. Nevill, G. Atkinson (1997) "Assessing agreement between measurements recorded on a ratio scale" in sports medicine and sports science
182
+
183
+
184
+
185
+ Parameters:
186
+ a (numpy.ndarray): First image.
187
+ b (numpy.ndarray): Second image.
188
+
189
+ Returns:
190
+ float: Sign Agreement Ratio, representing the proportion of agreement.
191
+ """
192
+ a = a.flatten()
193
+ b = b.flatten()
194
+ return 1 - np.mean(np.sign(a) == np.sign(b))
195
+
196
+ def sign_distance(a, b):
197
+ """
198
+ Compute the Sign Distance between two images.
199
+
200
+ The Sign Distance measures the proportion of pixels where the signs of
201
+ the values in two images differ. It is the complement of the Sign
202
+ Agreement Ratio and captures the inconsistency in the direction of
203
+ importance.
204
+
205
+ Reference:
206
+ Metric commonly used in image processing and computer vision.
207
+
208
+ Parameters:
209
+ a (numpy.ndarray): First image.
210
+ b (numpy.ndarray): Second image.
211
+
212
+ Returns:
213
+ float: Sign Distance, representing the proportion of disagreement.
214
+ """
215
+ a = a.flatten()
216
+ b = b.flatten()
217
+ return np.mean(np.sign(a) != np.sign(b))
218
+
219
+ def intersection_over_union(a, b):
220
+ """
221
+ Compute the Intersection over Union (IoU) between two images.
222
+
223
+ The IoU measures the overlap between two saliency maps by comparing
224
+ the intersection and union of their pixel values. It is commonly used
225
+ to evaluate the similarity of binary or thresholded saliency maps.
226
+
227
+ Reference:
228
+ Commonly used in object detection and segmentation literature.
229
+
230
+ Parameters:
231
+ a (numpy.ndarray): First image.
232
+ b (numpy.ndarray): Second image.
233
+
234
+ Returns:
235
+ float: Intersection over Union, representing the overlap ratio.
236
+ """
237
+ a = a.flatten()
238
+ b = b.flatten()
239
+ intersection = np.sum(np.minimum(a, b))
240
+ union = np.sum(np.maximum(a, b))
241
+ return 1 - intersection / union
242
+
243
+ def correlation_distance(a, b):
244
+ """
245
+ Compute the Correlation Distance between two images.
246
+
247
+ The Correlation Distance measures the linear relationship between
248
+ corresponding pixel values in two images. It captures how well the
249
+ variations in one image are correlated with the other.
250
+
251
+ Reference:
252
+ Commonly used in statistics and signal processing.
253
+
254
+ Parameters:
255
+ a (numpy.ndarray): First image.
256
+ b (numpy.ndarray): Second image.
257
+
258
+ Returns:
259
+ float: Correlation Distance, representing the inverse of correlation.
260
+ """
261
+ a = a.flatten()
262
+ b = b.flatten()
263
+ return 1 - np.corrcoef(a, b)[0, 1]
264
+
265
+ def mean_squared_error(a, b):
266
+ """
267
+ Compute the Mean Squared Error (MSE) between two images.
268
+
269
+ The MSE measures the average squared difference between corresponding
270
+ pixels in two images. It emphasizes larger deviations more than the
271
+ Mean Absolute Error.
272
+
273
+ Reference:
274
+ Commonly used in regression analysis and image processing.
275
+
276
+ Parameters:
277
+ a (numpy.ndarray): First image.
278
+ b (numpy.ndarray): Second image.
279
+
280
+ Returns:
281
+ float: Mean Squared Error, representing the average squared deviation.
282
+ """
283
+ return np.mean((a - b) ** 2)
284
+
285
+ def ssim(a, b):
286
+ """
287
+ Compute the Structural Similarity Index Measure (SSIM) between two images.
288
+
289
+ The SSIM evaluates the perceptual similarity between two images by
290
+ considering luminance, contrast, and structure. It is widely used for
291
+ assessing image quality and similarity.
292
+
293
+ Reference:
294
+ Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004).
295
+ "Image quality assessment: From error visibility to structural similarity."
296
+
297
+ Parameters:
298
+ a (numpy.ndarray): First image.
299
+ b (numpy.ndarray): Second image.
300
+
301
+ Returns:
302
+ float: SSIM value, representing the perceptual similarity.
303
+ """
304
+ return (1 - metrics.structural_similarity(a, b, full=False,
305
+ data_range=np.maximum(a.max(), b.max()) - np.minimum(a.min(), b.min()))) / 2
306
+
307
+ def psnr(a, b):
308
+ """
309
+ Compute the Peak Signal-to-Noise Ratio (PSNR) between two images.
310
+
311
+ The PSNR measures the ratio between the maximum possible pixel value
312
+ and the mean squared error. It is commonly used to evaluate the quality
313
+ of reconstructed images.
314
+
315
+ Reference:
316
+ Huynh-Thu, Q., & Ghanbari, M. (2008).
317
+ "Scope of validity of PSNR in image/video quality assessment."
318
+
319
+ Parameters:
320
+ a (numpy.ndarray): First image.
321
+ b (numpy.ndarray): Second image.
322
+
323
+ Returns:
324
+ float: PSNR value, representing the signal-to-noise ratio.
325
+ """
326
+ return metrics.peak_signal_noise_ratio(a, b,
327
+ data_range=np.maximum(a.max(), b.max()) - np.minimum(a.min(), b.min()))
328
+
329
+ def czenakowski_distance(a, b):
330
+ """
331
+ Compute the Czekanowski Distance between two images.
332
+
333
+ The Czekanowski Distance measures the dissimilarity between two images
334
+ based on the ratio of their minimum and total pixel values. It is useful
335
+ for comparing distributions with overlapping regions.
336
+
337
+ Reference:
338
+ T. SORENSEN (1948) "A method of establishing groups of equal amplitude in plant sociology based on similarity of species content and its application to analyses of the vegetation on danish commons." Biologiske Skrifter.
339
+
340
+ Parameters:
341
+ a (numpy.ndarray): First image.
342
+ b (numpy.ndarray): Second image.
343
+
344
+ Returns:
345
+ float: Czekanowski Distance, representing the dissimilarity.
346
+ """
347
+ sum_of_minimums = np.sum(np.minimum(a, b))
348
+ sum_of_values = np.sum(a + b)
349
+ if sum_of_values == 0:
350
+ return 0 # If both images are all zeros, they're identical
351
+ return 1 - (2 * sum_of_minimums) / sum_of_values
352
+
353
+ def jaccard_index(a, b):
354
+ """
355
+ Compute the Jaccard Index between two images.
356
+
357
+ The Jaccard Index measures the similarity between two images by comparing
358
+ the intersection and union of their pixel values. It is commonly used for
359
+ evaluating binary or thresholded saliency maps.
360
+
361
+ Reference:
362
+ Commonly used in set theory and image segmentation literature.
363
+
364
+ Parameters:
365
+ a (numpy.ndarray): First image.
366
+ b (numpy.ndarray): Second image.
367
+
368
+ Returns:
369
+ float: Jaccard Index, representing the similarity ratio.
370
+ """
371
+ a = a.flatten()
372
+ b = b.flatten()
373
+ intersection = np.sum(np.minimum(a, b))
374
+ union = np.sum(np.maximum(a, b))
375
+ if union == 0:
376
+ return 0 # If both images are all zeros, they're identical
377
+ return intersection / union
378
+
379
+ def jaccard_distance(a, b):
380
+ """
381
+ Compute the Jaccard Distance between two images.
382
+
383
+ The Jaccard Distance is the complement of the Jaccard Index and measures
384
+ the dissimilarity between two images. It is useful for evaluating the
385
+ differences between binary or thresholded saliency maps.
386
+
387
+ Reference:
388
+ Commonly used in set theory and image segmentation literature.
389
+
390
+ Parameters:
391
+ a (numpy.ndarray): First image.
392
+ b (numpy.ndarray): Second image.
393
+
394
+ Returns:
395
+ float: Jaccard Distance, representing the dissimilarity ratio.
396
+ """
397
+ return 1 - jaccard_index(a, b)
398
+
399
+ # Assign readable names to metrics
400
+ cosine_distance.__name__ = "$ShapGap_{Cosine}$"
401
+ euclidean_distance.__name__ = "$ShapGap_{L2}$"
402
+ emd.__name__ = "Earth Mover's Distance"
403
+ mean_absolute_error.__name__ = "MAE"
404
+ sign_agreement_ratio.__name__ = "Sign Agreement Ratio"
405
+ sign_distance.__name__ = "Sign Distance"
406
+ intersection_over_union.__name__ = "Intersection over Union"
407
+ correlation_distance.__name__ = "Correlation Distance"
408
+ mean_squared_error.__name__ = "MSE"
409
+ ssim.__name__ = "SSIM"
410
+ psnr.__name__ = "PSNR"
411
+ czenakowski_distance.__name__ = "Czekanowski Distance"
412
+ jaccard_distance.__name__ = "Jaccard Distance"
413
+ jaccard_index.__name__ = "Jaccard Index"
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: saliencytools
3
+ Version: 0.30
4
+ Summary: A collection of metrics for comparing saliency maps
5
+ Home-page: https://github.com/valevalerio/saliencytools
6
+ Author: Valerio Bonsignori
7
+ Author-email: Valerio Bonsignori <valerio.bonsignori@phd.unipi.it>
8
+ License-Expression: MIT
9
+ Project-URL: Homepage, https://github.com/valevalerio/saliencytools
10
+ Project-URL: Issues, https://github.com/valevalerio/saliencytools/issues
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Operating System :: OS Independent
13
+ Requires-Python: >=3.10
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE
16
+ Dynamic: author
17
+ Dynamic: home-page
18
+ Dynamic: license-file
19
+ Dynamic: requires-python
20
+
21
+ # Saliency Metrics
22
+
23
+ ![Tests](https://github.com/valevalerio/saliencytools/actions/workflows/test.yml/badge.svg)
24
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
25
+ [![PyPI version](https://img.shields.io/pypi/v/saliencytools)](https://pypi.org/project/saliencytools/)
26
+ [![Documentation Status](https://img.shields.io/website?url=https://valevalerio.github.io/saliencytools/)](https://valevalerio.github.io/saliencytools/)
27
+
28
+
29
+
30
+ **Saliency Metrics** is a Python package that implements various metrics for comparing saliency maps generated by explanation methods. To ensure fair comparisons, metrics should be computed on the same saliency map and corresponding ground truth map.
31
+ The package includes the following metrics:
32
+ - **SSIM (Structural Similarity Index)**: A perceptual metric that quantifies the similarity between two images. It considers changes in structural information, luminance, and contrast.
33
+ - **PSNR (Peak Signal-to-Noise Ratio)**: A metric that measures the ratio between the maximum possible power of a signal and the power of corrupting noise. It is often used to assess the quality of reconstructed images.
34
+ - **EMD (Earth Mover's Distance)**: A metric that measures the distance between two probability distributions over a region D. It is often used in computer vision and image retrieval tasks.
35
+
36
+ ```tutorial.ipynb``` is an original way used to check and test the different metrics.
37
+ # Installation
38
+
39
+ ```pip install saliencytools```
40
+
41
+ ### This module is a work in progress and is not yet complete.
42
+
43
+ # Usage
44
+
45
+ ```python
46
+ from saliencytools import ssim, psnr, emd
47
+
48
+ import numpy as np
49
+ import matplotlib.pyplot as plt
50
+
51
+
52
+
53
+ # create a random saliency map
54
+ saliency_map = np.random.rand(28*28).reshape(28, 28)
55
+ # create a random ground truth map
56
+ ground_truth_map = np.random.rand(28*28).reshape(28, 28)
57
+ # create a random binary mask
58
+
59
+ # use all the metrics to compare the saliency map with the ground truth map
60
+ for metric in [ssim, psnr, emd]:
61
+
62
+ print(f"{metric.__name__}: {metric(saliency_map, ground_truth_map)}")
63
+
64
+
65
+ ```
@@ -0,0 +1,13 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ setup.py
5
+ saliencytools/__init__.py
6
+ saliencytools/maskcompare.py
7
+ saliencytools.egg-info/PKG-INFO
8
+ saliencytools.egg-info/SOURCES.txt
9
+ saliencytools.egg-info/dependency_links.txt
10
+ saliencytools.egg-info/top_level.txt
11
+ test/__init__.py
12
+ test/test_metrics.py
13
+ test/test_readme.py
@@ -0,0 +1,2 @@
1
+ saliencytools
2
+ test
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,30 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="saliencytools",
5
+ version="0.1.0",
6
+ author="Valerio Bonsignori",
7
+ author_email="valerio.bonsignori@phd.unipi.it",
8
+ description="A collection of metrics to compare saliency maps, validated using KNN-like classifiers on MNIST.",
9
+ long_description= open("README.md").read(),
10
+ long_description_content_type="text/markdown",
11
+ url="https://github.com/valevalerio/saliencytools",
12
+ packages=find_packages(),
13
+ classifiers=[
14
+ "Programming Language :: Python :: 3",
15
+ "License :: OSI Approved :: MIT License",
16
+ "Operating System :: OS Independent",
17
+ ],
18
+ python_requires=">=3.10",
19
+ install_requires=[
20
+ "numpy==1.24.4",
21
+ "pandas==1.5.3",
22
+ "matplotlib==3.7.5",
23
+ "seaborn==0.13.2",
24
+ "scikit-learn==1.3.2",
25
+ "scikit-multilearn==0.2.0",
26
+ "scipy==1.10.1",
27
+ "torch",
28
+ "sklearn-image"
29
+ ],
30
+ )
File without changes
@@ -0,0 +1,61 @@
1
+ """ This Module is used to check the coherency of the metrics implemented in the project."""
2
+
3
+ from saliencytools import (
4
+ normalize_mask_0_1,
5
+ clip_mask,
6
+ euclidean_distance,
7
+ cosine_distance,
8
+ emd,
9
+ mean_absolute_error,
10
+ sign_agreement_ratio,
11
+ sign_distance,
12
+ intersection_over_union,
13
+ correlation_distance,
14
+ mean_squared_error,
15
+ ssim,
16
+ psnr,
17
+ czenakowski_distance,
18
+ jaccard_distance)
19
+ import numpy as np
20
+ def test_metrics():
21
+
22
+ test_map_1 = np.random.rand(28*28).reshape(28, 28)
23
+ test_map_2 = np.random.rand(28*28).reshape(28, 28)
24
+
25
+ # Clip the maps
26
+ test_map_1 = clip_mask(test_map_1)
27
+ test_map_2 = clip_mask(test_map_2)
28
+ # Normalize the maps
29
+ test_map_1 = normalize_mask_0_1(test_map_1)
30
+ test_map_2 = normalize_mask_0_1(test_map_2)
31
+
32
+ # Check if the maps are not empty
33
+ assert test_map_1.size > 0, "test_map_1 is empty"
34
+ assert test_map_2.size > 0, "test_map_2 is empty"
35
+
36
+ for metric in [
37
+ euclidean_distance,
38
+ cosine_distance,
39
+ emd,
40
+ mean_absolute_error,
41
+ sign_agreement_ratio,
42
+ sign_distance,
43
+ intersection_over_union,
44
+ correlation_distance,
45
+ mean_squared_error,
46
+ ssim,
47
+ psnr,
48
+ czenakowski_distance,
49
+ jaccard_distance
50
+ ]:
51
+ # Check if the metric is symmetric
52
+ assert metric(test_map_1, test_map_2) == metric(test_map_2, test_map_1), f"{metric.__name__} is not symmetric"
53
+ # Check if the metric is non-negative
54
+ assert metric(test_map_1, test_map_2) >= 0, f"{metric.__name__} is negative"
55
+ # Check if the metric is zero when both maps are identical
56
+ if metric.__name__ not in ["ssim", "psnr"]:
57
+ # Skip ssim and psnr for this test
58
+ # as they are not zero when the maps are identical
59
+ continue
60
+ # Check if the metric is zero when both maps are identical
61
+ assert np.allclose(metric(test_map_1, test_map_1), 0), f"{metric.__name__} is not zero when maps are identical"
@@ -0,0 +1,19 @@
1
+ from saliencytools import ssim, psnr, emd
2
+
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+
6
+
7
+ def test_readme():
8
+ """
9
+ This function is used to test the README file
10
+ by running the example code in the README file
11
+ """
12
+ # create a random saliency map
13
+ saliency_map = np.random.rand(28*28).reshape(28, 28)
14
+ # create a random ground truth map
15
+ ground_truth_map = np.random.rand(28*28).reshape(28, 28)
16
+
17
+ # use all the metrics to compare the saliency map with the ground truth map
18
+ for metric in [ssim, psnr, emd]:
19
+ f"{metric.__name__}: {metric(saliency_map, ground_truth_map)}"