signal-grad-cam 0.1.0__tar.gz → 0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of signal-grad-cam might be problematic. Click here for more details.
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/PKG-INFO +1 -1
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/setup.py +1 -1
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam/cam_builder.py +17 -14
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam/pytorch_cam_builder.py +28 -6
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam/tensorflow_cam_builder.py +21 -4
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam.egg-info/PKG-INFO +1 -1
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/LICENSE +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/README.md +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/pyproject.toml +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/setup.cfg +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam/__init__.py +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam.egg-info/SOURCES.txt +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam.egg-info/dependency_links.txt +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam.egg-info/not-zip-safe +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam.egg-info/requires.txt +0 -0
- {signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: signal_grad_cam
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.2
|
|
4
4
|
Summary: SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.
|
|
5
5
|
Home-page: https://github.com/samuelepe11/signal_grad_cam
|
|
6
6
|
Author: Samuele Pe
|
|
@@ -5,7 +5,7 @@ with open("README.md", "r") as f:
|
|
|
5
5
|
|
|
6
6
|
setup(
|
|
7
7
|
name="signal_grad_cam",
|
|
8
|
-
version="0.1.
|
|
8
|
+
version="0.1.2",
|
|
9
9
|
description="SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability"
|
|
10
10
|
" and efficiency.",
|
|
11
11
|
keywords="XAI, class activation maps, CNN, time series",
|
|
@@ -21,7 +21,7 @@ class CamBuilder:
|
|
|
21
21
|
"HiResCAM": "High-Resolution Class Activation Mapping"}
|
|
22
22
|
|
|
23
23
|
def __init__(self, model: torch.nn.Module | tf.keras.Model | Any,
|
|
24
|
-
transform_fn: Callable[[np.ndarray,
|
|
24
|
+
transform_fn: Callable[[np.ndarray, *tuple[Any, ...]], torch.Tensor | tf.Tensor] = None,
|
|
25
25
|
class_names: List[str] = None, time_axs: int = 1, input_transposed: bool = False,
|
|
26
26
|
ignore_channel_dim: bool = False, model_output_index: int = None, extend_search: bool = False,
|
|
27
27
|
padding_dim: int = None, seed: int = 11):
|
|
@@ -103,7 +103,7 @@ class CamBuilder:
|
|
|
103
103
|
explainer_types: str | List[str], target_layers: str | List[str], softmax_final: bool,
|
|
104
104
|
data_names: List[str] = None, data_sampling_freq: float = None, dt: float = 10,
|
|
105
105
|
channel_names: List[str | float] = None, results_dir_path: str = None, aspect_factor: float = 100,
|
|
106
|
-
data_shape_list: List[Tuple[int, int]] = None, extra_preprocess_inputs_list: List[Any] = None,
|
|
106
|
+
data_shape_list: List[Tuple[int, int]] = None, extra_preprocess_inputs_list: List[List[Any]] = None,
|
|
107
107
|
extra_inputs_list: List[Any] = None, time_names: List[str | float] = None,
|
|
108
108
|
axes_names: Tuple[str | None, str | None] | List[str | None] = None) \
|
|
109
109
|
-> Tuple[Dict[str, List[np.ndarray]], Dict[str, np.ndarray], Dict[str, Tuple[np.ndarray, np.ndarray]]]:
|
|
@@ -139,9 +139,9 @@ class CamBuilder:
|
|
|
139
139
|
one-dimensional CAM.
|
|
140
140
|
:param data_shape_list: (optional, default is None) A list of integer tuples storing the original input sizes,
|
|
141
141
|
used to set the CAM shape after resizing during preprocessing. The expected format is number of rows x
|
|
142
|
-
|
|
143
|
-
:param extra_preprocess_inputs_list: (optional, defaults is None) A list of
|
|
144
|
-
the preprocessing method.
|
|
142
|
+
number of columns.
|
|
143
|
+
:param extra_preprocess_inputs_list: (optional, defaults is None) A list of lists, where the i-th sub-list
|
|
144
|
+
represents the additional input objects required by the preprocessing method for the i-th input.
|
|
145
145
|
:param extra_inputs_list: (optional, default is None) A list of additional input objects required by the model's
|
|
146
146
|
forward method.
|
|
147
147
|
:param time_names: (optional, default is None) A list of strings representing tick names for the time axis.
|
|
@@ -448,9 +448,9 @@ class CamBuilder:
|
|
|
448
448
|
txt = " - " + addon + f"{name}:\t{type(layer).__name__}"
|
|
449
449
|
print(txt)
|
|
450
450
|
|
|
451
|
-
def _create_raw_batched_cams(self, data_list: List[np.ndarray], target_class: int,
|
|
452
|
-
explainer_type: str, softmax_final: bool,
|
|
453
|
-
|
|
451
|
+
def _create_raw_batched_cams(self, data_list: List[np.ndarray | torch.Tensor | tf.Tensor], target_class: int,
|
|
452
|
+
target_layer: str, explainer_type: str, softmax_final: bool,
|
|
453
|
+
extra_inputs_list: List[Any] = None) -> Tuple[List[np.ndarray], np.ndarray]:
|
|
454
454
|
"""
|
|
455
455
|
Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
|
|
456
456
|
and target class). Additionally, it returns the class probabilities predicted by the model.
|
|
@@ -511,7 +511,7 @@ class CamBuilder:
|
|
|
511
511
|
|
|
512
512
|
def __create_batched_cams(self, data_list: List[np.ndarray], target_class: int, target_layer: str,
|
|
513
513
|
explainer_type: str, softmax_final: bool, data_shape_list: List[Tuple[int, int]] = None,
|
|
514
|
-
extra_preprocess_inputs_list: List[Any] = None, extra_inputs_list: List[Any] = None) \
|
|
514
|
+
extra_preprocess_inputs_list: List[List[Any]] = None, extra_inputs_list: List[Any] = None) \
|
|
515
515
|
-> Tuple[List[np.ndarray], np.ndarray, Tuple[np.ndarray, np.ndarray]]:
|
|
516
516
|
"""
|
|
517
517
|
Prepares the input data list and retrieves CAMs based on the specified settings (defined by algorithm, target
|
|
@@ -530,11 +530,11 @@ class CamBuilder:
|
|
|
530
530
|
:param data_shape_list: (optional, default is None) A list of integer tuples storing the original input sizes,
|
|
531
531
|
used to set the CAM shape after resizing during preprocessing. The expected format is number of rows x
|
|
532
532
|
number of columns.
|
|
533
|
-
:param extra_preprocess_inputs_list: (optional, defaults is None) A list of
|
|
534
|
-
the preprocessing method.
|
|
533
|
+
:param extra_preprocess_inputs_list: (optional, defaults is None) A list of lists, where the i-th sub-list
|
|
534
|
+
represents the additional input objects required by the preprocessing method for the i-th input.
|
|
535
535
|
:param extra_inputs_list: (optional, default is None) A list of additional input objects required by the model's
|
|
536
536
|
forward method.
|
|
537
|
-
|
|
537
|
+
0
|
|
538
538
|
:return:
|
|
539
539
|
- cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
|
|
540
540
|
given setting (defined by algorithm, target layer, and target class).
|
|
@@ -552,8 +552,11 @@ class CamBuilder:
|
|
|
552
552
|
if data_shape_list is None:
|
|
553
553
|
data_shape_list = [data_element.shape for data_element in data_list]
|
|
554
554
|
if self.transform_fn is not None:
|
|
555
|
-
extra_preprocess_inputs_list
|
|
556
|
-
|
|
555
|
+
if extra_preprocess_inputs_list is not None:
|
|
556
|
+
data_list = [self.transform_fn(data_element, *extra_preprocess_inputs_list[i]) for i, data_element in
|
|
557
|
+
enumerate(data_list)]
|
|
558
|
+
else:
|
|
559
|
+
data_list = [self.transform_fn(data_element) for data_element in data_list]
|
|
557
560
|
|
|
558
561
|
# Ensure data have consistent size for batching
|
|
559
562
|
if len(data_list) > 1 and self.padding_dim is None:
|
|
@@ -13,7 +13,7 @@ class TorchCamBuilder(CamBuilder):
|
|
|
13
13
|
Represents a PyTorch Class Activation Map (CAM) builder, supporting multiple methods such as Grad-CAM and HiResCAM.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
def __init__(self, model: nn.Module | Any, transform_fn: Callable[[np.ndarray,
|
|
16
|
+
def __init__(self, model: nn.Module | Any, transform_fn: Callable[[np.ndarray, *tuple[Any, ...]], torch.Tensor]
|
|
17
17
|
= None, class_names: List[str] = None, time_axs: int = 1, input_transposed: bool = False,
|
|
18
18
|
ignore_channel_dim: bool = False, model_output_index: int = None, extend_search: bool = False,
|
|
19
19
|
use_gpu: bool = False, padding_dim: int = None, seed: int = 11):
|
|
@@ -68,7 +68,8 @@ class TorchCamBuilder(CamBuilder):
|
|
|
68
68
|
else:
|
|
69
69
|
print("Your PyTorch model has no 'eval' method. Please verify that the networks has been set to "
|
|
70
70
|
"evaluation mode before the TorchCamBuilder initialization.")
|
|
71
|
-
self.use_gpu = use_gpu
|
|
71
|
+
self.use_gpu = use_gpu and torch.cuda.is_available()
|
|
72
|
+
self.device = "cuda" if self.use_gpu else "cpu"
|
|
72
73
|
|
|
73
74
|
# Assign the default transform function
|
|
74
75
|
if transform_fn is None:
|
|
@@ -117,9 +118,9 @@ class TorchCamBuilder(CamBuilder):
|
|
|
117
118
|
isinstance(layer, nn.Softmax) or isinstance(layer, nn.Sigmoid)):
|
|
118
119
|
super()._show_layer(name, layer, potential=potential)
|
|
119
120
|
|
|
120
|
-
def _create_raw_batched_cams(self, data_list: List[np.
|
|
121
|
-
explainer_type: str, softmax_final: bool,
|
|
122
|
-
|
|
121
|
+
def _create_raw_batched_cams(self, data_list: List[np.ndarray | torch.Tensor], target_class: int,
|
|
122
|
+
target_layer: nn.Module, explainer_type: str, softmax_final: bool,
|
|
123
|
+
extra_inputs_list: List[Any] = None) -> Tuple[List[np.ndarray], np.ndarray]:
|
|
123
124
|
"""
|
|
124
125
|
Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
|
|
125
126
|
and target class). Additionally, it returns the class probabilities predicted by the model.
|
|
@@ -168,6 +169,10 @@ class TorchCamBuilder(CamBuilder):
|
|
|
168
169
|
data_list = [x.unsqueeze(0) for x in data_list]
|
|
169
170
|
data_batch = torch.stack(data_list)
|
|
170
171
|
|
|
172
|
+
# Set device
|
|
173
|
+
self.model = self.model.to(self.device)
|
|
174
|
+
data_batch = data_batch.to(self.device)
|
|
175
|
+
|
|
171
176
|
extra_inputs_list = extra_inputs_list or []
|
|
172
177
|
outputs = self.model(data_batch, *extra_inputs_list)
|
|
173
178
|
if isinstance(outputs, tuple):
|
|
@@ -178,9 +183,26 @@ class TorchCamBuilder(CamBuilder):
|
|
|
178
183
|
# during derivation
|
|
179
184
|
target_scores = torch.log(outputs)
|
|
180
185
|
target_probs = outputs
|
|
186
|
+
|
|
187
|
+
# Adjust results for binary network
|
|
188
|
+
if len(outputs.shape) == 1:
|
|
189
|
+
target_scores = torch.stack([-target_scores, target_scores], dim=1)
|
|
190
|
+
target_probs = torch.stack([1 - target_probs, target_probs], dim=1)
|
|
191
|
+
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
192
|
+
target_scores = torch.cat([-target_scores, target_scores], dim=1)
|
|
193
|
+
target_probs = torch.cat([1 - target_probs, target_probs], dim=1)
|
|
181
194
|
else:
|
|
195
|
+
if len(outputs.shape) == 1:
|
|
196
|
+
outputs = torch.stack([-outputs, outputs], dim=1)
|
|
197
|
+
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
198
|
+
outputs = torch.cat([-outputs, outputs], dim=1)
|
|
182
199
|
target_scores = outputs
|
|
183
|
-
|
|
200
|
+
|
|
201
|
+
if len(outputs.shape) == 2 and outputs.shape[1] > 1:
|
|
202
|
+
target_probs = torch.softmax(target_scores, dim=1)
|
|
203
|
+
else:
|
|
204
|
+
tmp = torch.sigmoid(target_scores[:, 1])
|
|
205
|
+
target_probs = torch.stack([1 - tmp, tmp], dim=1)
|
|
184
206
|
|
|
185
207
|
target_probs = target_probs[:, target_class].cpu().detach().numpy()
|
|
186
208
|
|
|
@@ -18,7 +18,7 @@ class TfCamBuilder(CamBuilder):
|
|
|
18
18
|
HiResCAM.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
|
-
def __init__(self, model: tf.keras.Model | Any, transform_fn: Callable[[np.ndarray,
|
|
21
|
+
def __init__(self, model: tf.keras.Model | Any, transform_fn: Callable[[np.ndarray, *tuple[Any, ...]], tf.Tensor]
|
|
22
22
|
= None, class_names: List[str] = None, time_axs: int = 1, input_transposed: bool = False,
|
|
23
23
|
ignore_channel_dim: bool = False, model_output_index: int = None, extend_search: bool = False,
|
|
24
24
|
padding_dim: int = None, seed: int = 11):
|
|
@@ -142,7 +142,7 @@ class TfCamBuilder(CamBuilder):
|
|
|
142
142
|
isinstance(layer, keras.layers.Softmax) or isinstance(layer, keras.Sequential)):
|
|
143
143
|
super()._show_layer(name, layer, potential=potential)
|
|
144
144
|
|
|
145
|
-
def _create_raw_batched_cams(self, data_list: List[np.
|
|
145
|
+
def _create_raw_batched_cams(self, data_list: List[np.ndarray | tf.Tensor], target_class: int,
|
|
146
146
|
target_layer: tf.keras.layers.Layer, explainer_type: str, softmax_final: bool,
|
|
147
147
|
extra_inputs_list: List[Any] = None) -> Tuple[List[np.ndarray], np.ndarray]:
|
|
148
148
|
"""
|
|
@@ -189,7 +189,7 @@ class TfCamBuilder(CamBuilder):
|
|
|
189
189
|
|
|
190
190
|
grad_model = keras.models.Model(self.model.inputs, [target_layer.output, self.model.output])
|
|
191
191
|
extra_inputs_list = extra_inputs_list or []
|
|
192
|
-
with tf.GradientTape() as tape:
|
|
192
|
+
with (tf.GradientTape() as tape):
|
|
193
193
|
self.activations, outputs = grad_model([data_batch] + extra_inputs_list)
|
|
194
194
|
|
|
195
195
|
if softmax_final:
|
|
@@ -197,9 +197,26 @@ class TfCamBuilder(CamBuilder):
|
|
|
197
197
|
# during derivation
|
|
198
198
|
target_scores = tf.math.log(outputs)
|
|
199
199
|
target_probs = outputs
|
|
200
|
+
|
|
201
|
+
# Adjust results for binary network
|
|
202
|
+
if len(outputs.shape) == 1:
|
|
203
|
+
target_scores = tf.stack([-target_scores, target_scores], axis=1)
|
|
204
|
+
target_probs = tf.stack([1 - target_probs, target_probs], axis=1)
|
|
205
|
+
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
206
|
+
target_scores = tf.concat([-target_scores, target_scores], axis=1)
|
|
207
|
+
target_probs = tf.concat([1 - target_probs, target_probs], axis=1)
|
|
200
208
|
else:
|
|
209
|
+
if len(outputs.shape) == 1:
|
|
210
|
+
outputs = tf.stack([-outputs, outputs], axis=1)
|
|
211
|
+
elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
|
|
212
|
+
outputs = tf.concat([-outputs, outputs], axis=1)
|
|
201
213
|
target_scores = outputs
|
|
202
|
-
|
|
214
|
+
|
|
215
|
+
if len(outputs.shape) == 2 and outputs.shape[1] > 1:
|
|
216
|
+
target_probs = tf.nn.softmax(target_scores, axis=1)
|
|
217
|
+
else:
|
|
218
|
+
tmp = tf.math.sigmoid(target_scores[:, 1])
|
|
219
|
+
target_probs = tf.stack([1 - tmp, tmp], axis=1)
|
|
203
220
|
|
|
204
221
|
target_scores = target_scores[:, target_class]
|
|
205
222
|
target_probs = target_probs[:, target_class]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: signal-grad-cam
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.2
|
|
4
4
|
Summary: SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.
|
|
5
5
|
Home-page: https://github.com/samuelepe11/signal_grad_cam
|
|
6
6
|
Author: Samuele Pe
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{signal_grad_cam-0.1.0 → signal_grad_cam-0.1.2}/signal_grad_cam.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|