signal-grad-cam 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of signal-grad-cam might be problematic. Click here for more details.
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/PKG-INFO +1 -1
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/setup.py +1 -1
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam/cam_builder.py +29 -12
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam/pytorch_cam_builder.py +35 -23
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam/tensorflow_cam_builder.py +37 -24
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam.egg-info/PKG-INFO +1 -1
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/LICENSE +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/README.md +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/pyproject.toml +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/setup.cfg +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam/__init__.py +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam.egg-info/SOURCES.txt +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam.egg-info/dependency_links.txt +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam.egg-info/not-zip-safe +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam.egg-info/requires.txt +0 -0
- {signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: signal_grad_cam
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.
|
|
5
5
|
Home-page: https://github.com/samuelepe11/signal_grad_cam
|
|
6
6
|
Author: Samuele Pe
|
|
@@ -5,7 +5,7 @@ with open("README.md", "r") as f:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="signal_grad_cam",
|
|
8
|
-
version="0.1.
|
|
8
|
+
version="0.1.4",
|
|
9
9
|
description="SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability"
|
|
10
10
|
" and efficiency.",
|
|
11
11
|
keywords="XAI, class activation maps, CNN, time series",
|
|
@@ -23,8 +23,8 @@ class CamBuilder:
|
|
|
23
23
|
def __init__(self, model: torch.nn.Module | tf.keras.Model | Any,
|
|
24
24
|
transform_fn: Callable[[np.ndarray, *tuple[Any, ...]], torch.Tensor | tf.Tensor] = None,
|
|
25
25
|
class_names: List[str] = None, time_axs: int = 1, input_transposed: bool = False,
|
|
26
|
-
ignore_channel_dim: bool = False,
|
|
27
|
-
padding_dim: int = None, seed: int = 11):
|
|
26
|
+
ignore_channel_dim: bool = False, is_regression_network: bool = False, model_output_index: int = None,
|
|
27
|
+
extend_search: bool = False, padding_dim: int = None, seed: int = 11):
|
|
28
28
|
"""
|
|
29
29
|
Initializes the CamBuilder class. The constructor also displays, if present and retrievable, the 1D- and
|
|
30
30
|
2D-convolutional layers in the network, as well as the final Sigmoid/Softmax activation. Additionally, the CAM
|
|
@@ -44,6 +44,10 @@ class CamBuilder:
|
|
|
44
44
|
during model inference, either by the model itself or by the preprocessing function.
|
|
45
45
|
:param ignore_channel_dim: (optional, default is False) A boolean indicating whether to ignore the channel
|
|
46
46
|
dimension. This is useful when the model expects inputs without a singleton channel dimension.
|
|
47
|
+
:param is_regression_network: (optional, default is False) A boolean indicating whether the network is designed
|
|
48
|
+
for a regression task. If set to True, the CAM will highlight both positive and negative contributions.
|
|
49
|
+
While negative contributions are typically irrelevant for classification-based saliency maps, they can be
|
|
50
|
+
meaningful in regression settings, as they may represent features that decrease the predicted value.
|
|
47
51
|
:param model_output_index: (optional, default is None) An integer index specifying which of the model's outputs
|
|
48
52
|
represents output scores (or probabilities). If there is only one output, this argument can be ignored.
|
|
49
53
|
:param extend_search: (optional, default is False) A boolean flag indicating whether to deepend the search for
|
|
@@ -67,6 +71,7 @@ class CamBuilder:
|
|
|
67
71
|
self.time_axs = time_axs
|
|
68
72
|
self.input_transposed = input_transposed
|
|
69
73
|
self.ignore_channel_dim = ignore_channel_dim
|
|
74
|
+
self.is_regression_network = is_regression_network
|
|
70
75
|
self.model_output_index = model_output_index
|
|
71
76
|
self.padding_dim = padding_dim
|
|
72
77
|
self.original_dims = []
|
|
@@ -105,7 +110,7 @@ class CamBuilder:
|
|
|
105
110
|
channel_names: List[str | float] = None, results_dir_path: str = None, aspect_factor: float = 100,
|
|
106
111
|
data_shape_list: List[Tuple[int, int]] = None, extra_preprocess_inputs_list: List[List[Any]] = None,
|
|
107
112
|
extra_inputs_list: List[Any] = None, time_names: List[str | float] = None,
|
|
108
|
-
axes_names: Tuple[str | None, str | None] | List[str | None] = None) \
|
|
113
|
+
axes_names: Tuple[str | None, str | None] | List[str | None] = None, eps: float = 1e-6) \
|
|
109
114
|
-> Tuple[Dict[str, List[np.ndarray]], Dict[str, np.ndarray], Dict[str, Tuple[np.ndarray, np.ndarray]]]:
|
|
110
115
|
"""
|
|
111
116
|
Allows the user to request Class Activation Maps (CAMs) for a given list of inputs, a set of algorithms,
|
|
@@ -136,7 +141,8 @@ class CamBuilder:
|
|
|
136
141
|
:param results_dir_path: (optional, default is None) A string representing the relative path to the directory
|
|
137
142
|
for storing results. If None, the output will be displayed in a figure.
|
|
138
143
|
:param aspect_factor: (optional, default is 100) A numerical value to set the aspect ratio of the output signal
|
|
139
|
-
one-dimensional CAM.
|
|
144
|
+
one-dimensional CAM. Note that this value should be grater than the length of the input signal considered,
|
|
145
|
+
otherwise it is set to the length of the considered signal.
|
|
140
146
|
:param data_shape_list: (optional, default is None) A list of integer tuples storing the original input sizes,
|
|
141
147
|
used to set the CAM shape after resizing during preprocessing. The expected format is number of rows x
|
|
142
148
|
number of columns.
|
|
@@ -147,6 +153,8 @@ class CamBuilder:
|
|
|
147
153
|
:param time_names: (optional, default is None) A list of strings representing tick names for the time axis.
|
|
148
154
|
:param axes_names: (optional, default is None) A tuple of strings representing names for X and Y axes,
|
|
149
155
|
respectively.
|
|
156
|
+
:param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
|
|
157
|
+
to avoid null or None results.
|
|
150
158
|
|
|
151
159
|
:return:
|
|
152
160
|
- cams_dict: A dictionary storing a list of CAMs. Each list contains CAMs for each item in the input data
|
|
@@ -183,7 +191,8 @@ class CamBuilder:
|
|
|
183
191
|
data_shape_list=data_shape_list,
|
|
184
192
|
extra_preprocess_inputs_list=
|
|
185
193
|
extra_preprocess_inputs_list,
|
|
186
|
-
extra_inputs_list=extra_inputs_list
|
|
194
|
+
extra_inputs_list=extra_inputs_list,
|
|
195
|
+
esp=eps)
|
|
187
196
|
item_key = explainer_type + "_" + target_layer + "_class" + str(target_class)
|
|
188
197
|
cams_dict.update({item_key: cam_list})
|
|
189
198
|
predicted_probs_dict.update({item_key: output_probs})
|
|
@@ -450,7 +459,8 @@ class CamBuilder:
|
|
|
450
459
|
|
|
451
460
|
def _create_raw_batched_cams(self, data_list: List[np.ndarray | torch.Tensor | tf.Tensor], target_class: int,
|
|
452
461
|
target_layer: str, explainer_type: str, softmax_final: bool,
|
|
453
|
-
extra_inputs_list: List[Any] = None
|
|
462
|
+
extra_inputs_list: List[Any] = None, eps: float = 1e-6) \
|
|
463
|
+
-> Tuple[List[np.ndarray], np.ndarray]:
|
|
454
464
|
"""
|
|
455
465
|
Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
|
|
456
466
|
and target class). Additionally, it returns the class probabilities predicted by the model.
|
|
@@ -466,6 +476,8 @@ class CamBuilder:
|
|
|
466
476
|
activation function.
|
|
467
477
|
:param extra_inputs_list: (optional, defaults is None) A list of additional input objects required by the
|
|
468
478
|
model's forward method.
|
|
479
|
+
:param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
|
|
480
|
+
to avoid null or None results.
|
|
469
481
|
|
|
470
482
|
:return:
|
|
471
483
|
- cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
|
|
@@ -511,7 +523,8 @@ class CamBuilder:
|
|
|
511
523
|
|
|
512
524
|
def __create_batched_cams(self, data_list: List[np.ndarray], target_class: int, target_layer: str,
|
|
513
525
|
explainer_type: str, softmax_final: bool, data_shape_list: List[Tuple[int, int]] = None,
|
|
514
|
-
extra_preprocess_inputs_list: List[List[Any]] = None, extra_inputs_list: List[Any] = None
|
|
526
|
+
extra_preprocess_inputs_list: List[List[Any]] = None, extra_inputs_list: List[Any] = None,
|
|
527
|
+
eps: float = 1e-6) \
|
|
515
528
|
-> Tuple[List[np.ndarray], np.ndarray, Tuple[np.ndarray, np.ndarray]]:
|
|
516
529
|
"""
|
|
517
530
|
Prepares the input data list and retrieves CAMs based on the specified settings (defined by algorithm, target
|
|
@@ -534,7 +547,9 @@ class CamBuilder:
|
|
|
534
547
|
represents the additional input objects required by the preprocessing method for the i-th input.
|
|
535
548
|
:param extra_inputs_list: (optional, default is None) A list of additional input objects required by the model's
|
|
536
549
|
forward method.
|
|
537
|
-
|
|
550
|
+
:param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
|
|
551
|
+
to avoid null or None results.
|
|
552
|
+
|
|
538
553
|
:return:
|
|
539
554
|
- cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
|
|
540
555
|
given setting (defined by algorithm, target layer, and target class).
|
|
@@ -568,7 +583,8 @@ class CamBuilder:
|
|
|
568
583
|
"time.")
|
|
569
584
|
|
|
570
585
|
cam_list, target_probs = self._create_raw_batched_cams(data_list, target_class, target_layer, explainer_type,
|
|
571
|
-
softmax_final, extra_inputs_list=extra_inputs_list
|
|
586
|
+
softmax_final, extra_inputs_list=extra_inputs_list,
|
|
587
|
+
eps=eps)
|
|
572
588
|
self.activations = None
|
|
573
589
|
self.gradients = None
|
|
574
590
|
cams = np.stack(cam_list)
|
|
@@ -648,7 +664,8 @@ class CamBuilder:
|
|
|
648
664
|
:param dt: (optional, default is 10) A numerical value representing the granularity of the time axis in seconds
|
|
649
665
|
in the output display.
|
|
650
666
|
:param aspect_factor: (optional, default is 100) A numerical value to set the aspect ratio of the output signal
|
|
651
|
-
one-dimensional CAM.
|
|
667
|
+
one-dimensional CAM. Note that this value should be grater than the length of the input signal considered,
|
|
668
|
+
otherwise it is set to the length of the considered signal.
|
|
652
669
|
:param bar_ranges: A tuple containing two np.ndarrays, corresponding to the minimum and maximum importance scores
|
|
653
670
|
per CAM for each item in the input data list, based on a given setting (defined by algorithm, target
|
|
654
671
|
layer, and target class).
|
|
@@ -674,7 +691,7 @@ class CamBuilder:
|
|
|
674
691
|
norm = self.__get_norm(map)
|
|
675
692
|
|
|
676
693
|
if map.shape[1] == 1:
|
|
677
|
-
aspect = int(map.shape[0] / aspect_factor)
|
|
694
|
+
aspect = int(map.shape[0] / aspect_factor) if map.shape[0] <= aspect_factor else 1
|
|
678
695
|
map = np.transpose(map)
|
|
679
696
|
else:
|
|
680
697
|
if is_2d_layer:
|
|
@@ -890,7 +907,7 @@ class CamBuilder:
|
|
|
890
907
|
- is_2d_layer: A boolean indicating whether the target layers 2D-convolutional layer.
|
|
891
908
|
"""
|
|
892
909
|
|
|
893
|
-
raise ValueError(target_layer + " must be a 1D or 2D convolutional layer.")
|
|
910
|
+
raise ValueError(str(target_layer) + " must be a 1D or 2D convolutional layer.")
|
|
894
911
|
|
|
895
912
|
@staticmethod
|
|
896
913
|
def __normalize_cams(cams: np.ndarray, is_2d_layer: bool) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
|
|
@@ -15,8 +15,8 @@ class TorchCamBuilder(CamBuilder):
|
|
|
15
15
|
|
|
16
16
|
def __init__(self, model: nn.Module | Any, transform_fn: Callable[[np.ndarray, *tuple[Any, ...]], torch.Tensor]
|
|
17
17
|
= None, class_names: List[str] = None, time_axs: int = 1, input_transposed: bool = False,
|
|
18
|
-
ignore_channel_dim: bool = False,
|
|
19
|
-
use_gpu: bool = False, padding_dim: int = None, seed: int = 11):
|
|
18
|
+
ignore_channel_dim: bool = False, is_regression_network: bool = False, model_output_index: int = None,
|
|
19
|
+
extend_search: bool = False, use_gpu: bool = False, padding_dim: int = None, seed: int = 11):
|
|
20
20
|
"""
|
|
21
21
|
Initializes the TorchCamBuilder class. The constructor also displays, if present and retrievable, the 1D- and
|
|
22
22
|
2D-convolutional layers in the network, as well as the final Sigmoid/Softmax activation. Additionally, the CAM
|
|
@@ -36,6 +36,10 @@ class TorchCamBuilder(CamBuilder):
|
|
|
36
36
|
during model inference, either by the model itself or by the preprocessing function.
|
|
37
37
|
:param ignore_channel_dim: (optional, default is False) A boolean indicating whether to ignore the channel
|
|
38
38
|
dimension. This is useful when the model expects inputs without a singleton channel dimension.
|
|
39
|
+
:param is_regression_network: (optional, default is False) A boolean indicating whether the network is designed
|
|
40
|
+
for a regression task. If set to True, the CAM will highlight both positive and negative contributions.
|
|
41
|
+
While negative contributions are typically irrelevant for classification-based saliency maps, they can be
|
|
42
|
+
meaningful in regression settings, as they may represent features that decrease the predicted value.
|
|
39
43
|
:param model_output_index: (optional, default is None) An integer index specifying which of the model's outputs
|
|
40
44
|
represents output scores (or probabilities). If there is only one output, this argument can be ignored.
|
|
41
45
|
:param extend_search: (optional, default is False) A boolean flag indicating whether to deepend the search for
|
|
@@ -52,6 +56,7 @@ class TorchCamBuilder(CamBuilder):
|
|
|
52
56
|
super(TorchCamBuilder, self).__init__(model=model, transform_fn=transform_fn, class_names=class_names,
|
|
53
57
|
time_axs=time_axs, input_transposed=input_transposed,
|
|
54
58
|
ignore_channel_dim=ignore_channel_dim,
|
|
59
|
+
is_regression_network=is_regression_network,
|
|
55
60
|
model_output_index=model_output_index, extend_search=extend_search,
|
|
56
61
|
padding_dim=padding_dim, seed=seed)
|
|
57
62
|
|
|
@@ -120,7 +125,8 @@ class TorchCamBuilder(CamBuilder):
|
|
|
120
125
|
|
|
121
126
|
def _create_raw_batched_cams(self, data_list: List[np.ndarray | torch.Tensor], target_class: int,
|
|
122
127
|
target_layer: nn.Module, explainer_type: str, softmax_final: bool,
|
|
123
|
-
extra_inputs_list: List[Any] = None
|
|
128
|
+
extra_inputs_list: List[Any] = None, eps: float = 1e-6) \
|
|
129
|
+
-> Tuple[List[np.ndarray], np.ndarray]:
|
|
124
130
|
"""
|
|
125
131
|
Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
|
|
126
132
|
and target class). Additionally, it returns the class probabilities predicted by the model.
|
|
@@ -136,6 +142,8 @@ class TorchCamBuilder(CamBuilder):
|
|
|
136
142
|
activation function.
|
|
137
143
|
:param extra_inputs_list: (optional, defaults is None) A list of additional input objects required by the
|
|
138
144
|
model's forward method.
|
|
145
|
+
:param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
|
|
146
|
+
to avoid null or None results.
|
|
139
147
|
|
|
140
148
|
:return:
|
|
141
149
|
- cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
|
|
@@ -179,30 +187,32 @@ class TorchCamBuilder(CamBuilder):
|
|
|
179
187
|
outputs = outputs[self.model_output_index]
|
|
180
188
|
|
|
181
189
|
if softmax_final:
|
|
182
|
-
# Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
|
|
183
|
-
# during derivation
|
|
184
|
-
target_scores = torch.log(outputs)
|
|
185
190
|
target_probs = outputs
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
191
|
+
if len(outputs.shape) == 2 and outputs.shape[1] > 1:
|
|
192
|
+
# Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
|
|
193
|
+
# during derivation. Clamp probabilities before log application to avoid null maps for maximum
|
|
194
|
+
# confidence.
|
|
195
|
+
target_scores = torch.log(torch.clamp(outputs, min=eps, max=1 - eps))
|
|
196
|
+
else:
|
|
197
|
+
# Adjust results for binary networks
|
|
198
|
+
target_scores = torch.logit(outputs, eps=eps)
|
|
199
|
+
if len(outputs.shape) == 1:
|
|
200
|
+
target_scores = torch.stack([-target_scores, target_scores], dim=1)
|
|
201
|
+
target_probs = torch.stack([1 - target_probs, target_probs], dim=1)
|
|
202
|
+
else:
|
|
203
|
+
target_scores = torch.cat([-target_scores, target_scores], dim=1)
|
|
204
|
+
target_probs = torch.cat([1 - target_probs, target_probs], dim=1)
|
|
194
205
|
else:
|
|
195
|
-
if len(outputs.shape) == 1:
|
|
196
|
-
outputs = torch.stack([-outputs, outputs], dim=1)
|
|
197
|
-
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
198
|
-
outputs = torch.cat([-outputs, outputs], dim=1)
|
|
199
206
|
target_scores = outputs
|
|
200
|
-
|
|
201
207
|
if len(outputs.shape) == 2 and outputs.shape[1] > 1:
|
|
202
208
|
target_probs = torch.softmax(target_scores, dim=1)
|
|
203
209
|
else:
|
|
204
|
-
|
|
205
|
-
|
|
210
|
+
if len(outputs.shape) == 1:
|
|
211
|
+
target_scores = torch.stack([-outputs, outputs], dim=1)
|
|
212
|
+
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
213
|
+
target_scores = torch.cat([-outputs, outputs], dim=1)
|
|
214
|
+
p = torch.sigmoid(outputs)
|
|
215
|
+
target_probs = torch.stack([1 - p, p], dim=1)
|
|
206
216
|
|
|
207
217
|
target_probs = target_probs[:, target_class].cpu().detach().numpy()
|
|
208
218
|
|
|
@@ -246,7 +256,8 @@ class TorchCamBuilder(CamBuilder):
|
|
|
246
256
|
activations[i, :] *= weights[i]
|
|
247
257
|
|
|
248
258
|
cam = torch.sum(activations, dim=0)
|
|
249
|
-
|
|
259
|
+
if not self.is_regression_network:
|
|
260
|
+
cam = torch.relu(cam)
|
|
250
261
|
return cam
|
|
251
262
|
|
|
252
263
|
def _get_hirescam_map(self, is_2d_layer: bool, batch_idx: int) -> torch.Tensor:
|
|
@@ -271,7 +282,8 @@ class TorchCamBuilder(CamBuilder):
|
|
|
271
282
|
activations[i, :] *= gradients[i, :]
|
|
272
283
|
|
|
273
284
|
cam = torch.sum(activations, dim=0)
|
|
274
|
-
|
|
285
|
+
if not self.is_regression_network:
|
|
286
|
+
cam = torch.relu(cam)
|
|
275
287
|
return cam
|
|
276
288
|
|
|
277
289
|
def __get_activation_forward_hook(self, layer: nn.Module, inputs: Tuple[torch.Tensor, ...], outputs: torch.Tensor) \
|
|
@@ -20,8 +20,8 @@ class TfCamBuilder(CamBuilder):
|
|
|
20
20
|
|
|
21
21
|
def __init__(self, model: tf.keras.Model | Any, transform_fn: Callable[[np.ndarray, *tuple[Any, ...]], tf.Tensor]
|
|
22
22
|
= None, class_names: List[str] = None, time_axs: int = 1, input_transposed: bool = False,
|
|
23
|
-
ignore_channel_dim: bool = False,
|
|
24
|
-
padding_dim: int = None, seed: int = 11):
|
|
23
|
+
ignore_channel_dim: bool = False, is_regression_network: bool = False, model_output_index: int = None,
|
|
24
|
+
extend_search: bool = False, padding_dim: int = None, seed: int = 11):
|
|
25
25
|
"""
|
|
26
26
|
Initializes the TfCamBuilder class. The constructor also displays, if present and retrievable, the 1D- and
|
|
27
27
|
2D-convolutional layers in the network, as well as the final Sigmoid/Softmax activation. Additionally, the CAM
|
|
@@ -41,6 +41,10 @@ class TfCamBuilder(CamBuilder):
|
|
|
41
41
|
during model inference, either by the model itself or by the preprocessing function.
|
|
42
42
|
:param ignore_channel_dim: (optional, default is False) A boolean indicating whether to ignore the channel
|
|
43
43
|
dimension. This is useful when the model expects inputs without a singleton channel dimension.
|
|
44
|
+
:param is_regression_network: (optional, default is False) A boolean indicating whether the network is designed
|
|
45
|
+
for a regression task. If set to True, the CAM will highlight both positive and negative contributions.
|
|
46
|
+
While negative contributions are typically irrelevant for classification-based saliency maps, they can be
|
|
47
|
+
meaningful in regression settings, as they may represent features that decrease the predicted value.
|
|
44
48
|
:param model_output_index: (optional, default is None) An integer index specifying which of the model's outputs
|
|
45
49
|
represents output scores (or probabilities). If there is only one output, this argument can be ignored.
|
|
46
50
|
:param extend_search: (optional, default is False) A boolean flag indicating whether to deepend the search for
|
|
@@ -54,7 +58,9 @@ class TfCamBuilder(CamBuilder):
|
|
|
54
58
|
# Initialize attributes
|
|
55
59
|
super(TfCamBuilder, self).__init__(model=model, transform_fn=transform_fn, class_names=class_names,
|
|
56
60
|
time_axs=time_axs, input_transposed=input_transposed,
|
|
57
|
-
ignore_channel_dim=ignore_channel_dim,
|
|
61
|
+
ignore_channel_dim=ignore_channel_dim,
|
|
62
|
+
is_regression_network=is_regression_network,
|
|
63
|
+
model_output_index=model_output_index,
|
|
58
64
|
extend_search=extend_search, padding_dim=padding_dim, seed=seed)
|
|
59
65
|
|
|
60
66
|
# Set seeds
|
|
@@ -144,7 +150,8 @@ class TfCamBuilder(CamBuilder):
|
|
|
144
150
|
|
|
145
151
|
def _create_raw_batched_cams(self, data_list: List[np.ndarray | tf.Tensor], target_class: int,
|
|
146
152
|
target_layer: tf.keras.layers.Layer, explainer_type: str, softmax_final: bool,
|
|
147
|
-
extra_inputs_list: List[Any] = None
|
|
153
|
+
extra_inputs_list: List[Any] = None, eps: float = 1e-6) \
|
|
154
|
+
-> Tuple[List[np.ndarray], np.ndarray]:
|
|
148
155
|
"""
|
|
149
156
|
Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
|
|
150
157
|
and target class). Additionally, it returns the class probabilities predicted by the model.
|
|
@@ -160,6 +167,8 @@ class TfCamBuilder(CamBuilder):
|
|
|
160
167
|
activation function.
|
|
161
168
|
:param extra_inputs_list: (optional, defaults is None) A list of additional input objects required by the
|
|
162
169
|
model's forward method.
|
|
170
|
+
:param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
|
|
171
|
+
to avoid null or None results.
|
|
163
172
|
|
|
164
173
|
:return:
|
|
165
174
|
- cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
|
|
@@ -193,30 +202,32 @@ class TfCamBuilder(CamBuilder):
|
|
|
193
202
|
self.activations, outputs = grad_model([data_batch] + extra_inputs_list)
|
|
194
203
|
|
|
195
204
|
if softmax_final:
|
|
196
|
-
# Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
|
|
197
|
-
# during derivation
|
|
198
|
-
target_scores = tf.math.log(outputs)
|
|
199
205
|
target_probs = outputs
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
206
|
+
if len(outputs.shape) == 2 and outputs.shape[1] > 1:
|
|
207
|
+
# Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
|
|
208
|
+
# during derivation. Clamp probabilities before log application to avoid null maps for maximum
|
|
209
|
+
# confidence.
|
|
210
|
+
target_scores = tf.math.log(tf.clip_by_value(outputs, eps, 1.0 - eps))
|
|
211
|
+
else:
|
|
212
|
+
# Adjust results for binary network
|
|
213
|
+
target_scores = tf.math.logit(outputs, eps=eps)
|
|
214
|
+
if len(outputs.shape) == 1:
|
|
215
|
+
target_scores = tf.stack([-target_scores, target_scores], axis=1)
|
|
216
|
+
target_probs = tf.stack([1 - target_probs, target_probs], axis=1)
|
|
217
|
+
else:
|
|
218
|
+
target_scores = tf.concat([-target_scores, target_scores], axis=1)
|
|
219
|
+
target_probs = tf.concat([1 - target_probs, target_probs], axis=1)
|
|
208
220
|
else:
|
|
209
|
-
if len(outputs.shape) == 1:
|
|
210
|
-
outputs = tf.stack([-outputs, outputs], axis=1)
|
|
211
|
-
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
212
|
-
outputs = tf.concat([-outputs, outputs], axis=1)
|
|
213
221
|
target_scores = outputs
|
|
214
|
-
|
|
215
222
|
if len(outputs.shape) == 2 and outputs.shape[1] > 1:
|
|
216
223
|
target_probs = tf.nn.softmax(target_scores, axis=1)
|
|
217
224
|
else:
|
|
218
|
-
|
|
219
|
-
|
|
225
|
+
if len(outputs.shape) == 1:
|
|
226
|
+
target_scores = tf.stack([-outputs, outputs], axis=1)
|
|
227
|
+
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
228
|
+
target_scores = tf.concat([-outputs, outputs], axis=1)
|
|
229
|
+
p = tf.math.sigmoid(outputs)
|
|
230
|
+
target_probs = tf.stack([1 - p, p], axis=1)
|
|
220
231
|
|
|
221
232
|
target_scores = target_scores[:, target_class]
|
|
222
233
|
target_probs = target_probs[:, target_class]
|
|
@@ -260,7 +271,8 @@ class TfCamBuilder(CamBuilder):
|
|
|
260
271
|
activations[:, i] *= weights[i]
|
|
261
272
|
|
|
262
273
|
cam = tf.reduce_sum(tf.convert_to_tensor(activations), axis=-1)
|
|
263
|
-
|
|
274
|
+
if not self.is_regression_network:
|
|
275
|
+
cam = tf.nn.relu(cam)
|
|
264
276
|
return cam
|
|
265
277
|
|
|
266
278
|
def _get_hirecam_map(self, is_2d_layer: bool, batch_idx: int) -> tf.Tensor:
|
|
@@ -285,7 +297,8 @@ class TfCamBuilder(CamBuilder):
|
|
|
285
297
|
activations[:, i] *= gradients[:, i]
|
|
286
298
|
|
|
287
299
|
cam = tf.reduce_sum(tf.convert_to_tensor(activations), axis=-1)
|
|
288
|
-
|
|
300
|
+
if not self.is_regression_network:
|
|
301
|
+
cam = tf.nn.relu(cam)
|
|
289
302
|
return cam
|
|
290
303
|
|
|
291
304
|
@staticmethod
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: signal-grad-cam
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.
|
|
5
5
|
Home-page: https://github.com/samuelepe11/signal_grad_cam
|
|
6
6
|
Author: Samuele Pe
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{signal_grad_cam-0.1.2 → signal_grad_cam-0.1.4}/signal_grad_cam.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|