signal-grad-cam 1.0.1__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of signal-grad-cam might be problematic. Click here for more details.

@@ -119,7 +119,7 @@ class TorchCamBuilder(CamBuilder):
119
119
  a layer (i.e., a generic instance attribute, not guaranteed to be a layer).
120
120
  """
121
121
 
122
- if (isinstance(layer, nn.Conv1d) or isinstance(layer, nn.Conv2d) or
122
+ if (isinstance(layer, nn.Conv1d) or isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Conv3d) or
123
123
  isinstance(layer, nn.Softmax) or isinstance(layer, nn.Sigmoid)):
124
124
  super()._show_layer(name, layer, potential=potential)
125
125
 
@@ -132,7 +132,8 @@ class TorchCamBuilder(CamBuilder):
132
132
  Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
133
133
  and target class). Additionally, it returns the class probabilities predicted by the model.
134
134
 
135
- :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
135
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal, an image, or
136
+ a video/volume.
136
137
  :param target_class: (mandatory) An integer representing the target class for the explanation.
137
138
  :param target_layer: (mandatory) A string representing the target layer for the explanation. This string should
138
139
  identify either PyTorch named modules or it should be a class dictionary key, used to retrieve the layer
@@ -176,6 +177,7 @@ class TorchCamBuilder(CamBuilder):
176
177
  data_list = padded_data_list
177
178
 
178
179
  is_2d_layer = self._is_2d_layer(target_layer)
180
+ is_3d_layer = is_2d_layer is None
179
181
  if not self.ignore_channel_dim and (is_2d_layer and len(data_list[0].shape) == 2 or not is_2d_layer
180
182
  and len(data_list[0].shape) == 1):
181
183
  data_list = [x.unsqueeze(0) for x in data_list]
@@ -233,35 +235,41 @@ class TorchCamBuilder(CamBuilder):
233
235
  target_score.backward(retain_graph=True)
234
236
 
235
237
  if explainer_type == "HiResCAM":
236
- cam = self._get_hirescam_map(is_2d_layer=is_2d_layer, batch_idx=i)
238
+ cam = self._get_hirescam_map(is_2d_layer=is_2d_layer, is_3d_layer=is_3d_layer, batch_idx=i)
237
239
  else:
238
- cam = self._get_gradcam_map(is_2d_layer=is_2d_layer, batch_idx=i)
240
+ cam = self._get_gradcam_map(is_2d_layer=is_2d_layer, is_3d_layer=is_3d_layer, batch_idx=i)
239
241
  cam_list.append(cam.cpu().detach().numpy())
240
242
 
241
243
  return cam_list, target_probs
242
244
 
243
- def _get_gradcam_map(self, is_2d_layer: bool, batch_idx: int) -> torch.Tensor:
245
+ def _get_gradcam_map(self, is_2d_layer: bool, is_3d_layer: bool, batch_idx: int) -> torch.Tensor:
244
246
  """
245
247
  Compute the CAM using the vanilla Gradient-weighted Class Activation Mapping (Grad-CAM) algorithm.
246
248
 
247
249
  :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
250
+ :param is_3d_layer: (mandatory) A boolean indicating whether the target layers 3D-convolutional layer.
248
251
  :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
249
252
  list.
250
253
 
251
- :return: cam: A PyTorch tensor representing the Class Activation Map (CAM) for the batch_idx-th input, built
252
- with the Grad-CAM algorithm.
254
+ :return:
255
+ - cam: A PyTorch tensor representing the Class Activation Map (CAM) for the batch_idx-th input, built with
256
+ the Grad-CAM algorithm.
253
257
  """
254
258
 
255
- if is_2d_layer:
259
+ if is_2d_layer is not None and is_2d_layer:
256
260
  dim_mean = (1, 2)
261
+ elif is_3d_layer:
262
+ dim_mean = (1, 2, 3)
257
263
  else:
258
264
  dim_mean = 1
259
265
  weights = torch.mean(self.gradients[batch_idx], dim=dim_mean)
260
266
  activations = self.activations[batch_idx].clone()
261
267
 
262
268
  for i in range(self.activations.shape[1]):
263
- if is_2d_layer:
269
+ if is_2d_layer is not None and is_2d_layer:
264
270
  activations[i, :, :] *= weights[i]
271
+ elif is_3d_layer:
272
+ activations[i, :, :, :] *= weights[i]
265
273
  else:
266
274
  activations[i, :] *= weights[i]
267
275
 
@@ -270,24 +278,28 @@ class TorchCamBuilder(CamBuilder):
270
278
  cam = torch.relu(cam)
271
279
  return cam
272
280
 
273
- def _get_hirescam_map(self, is_2d_layer: bool, batch_idx: int) -> torch.Tensor:
281
+ def _get_hirescam_map(self, is_2d_layer: bool, is_3d_layer: bool, batch_idx: int) -> torch.Tensor:
274
282
  """
275
283
  Compute the CAM using the High-Resolution Class Activation Mapping (HiResCAM) algorithm.
276
284
 
277
285
  :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
286
+ :param is_3d_layer: (mandatory) A boolean indicating whether the target layers 3D-convolutional layer.
278
287
  :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
279
288
  list.
280
289
 
281
- :return: cam: A PyTorch tensor representing the Class Activation Map (CAM) for the batch_idx-th input, built
282
- with the HiResCAM algorithm.
290
+ :return:
291
+ - cam: A PyTorch tensor representing the Class Activation Map (CAM) for the batch_idx-th input, built with
292
+ the HiResCAM algorithm.
283
293
  """
284
294
 
285
295
  activations = self.activations[batch_idx].clone()
286
296
  gradients = self.gradients[batch_idx]
287
297
 
288
298
  for i in range(self.activations.shape[1]):
289
- if is_2d_layer:
299
+ if is_2d_layer is not None and is_2d_layer:
290
300
  activations[i, :, :] *= gradients[i, :, :]
301
+ elif is_3d_layer:
302
+ activations[i, :, :, :] *= gradients[i, :, :, :]
291
303
  else:
292
304
  activations[i, :] *= gradients[i, :]
293
305
 
@@ -331,20 +343,23 @@ class TorchCamBuilder(CamBuilder):
331
343
  self.gradients = gradients
332
344
 
333
345
  @staticmethod
334
- def _is_2d_layer(target_layer: nn.Module) -> bool:
346
+ def _is_2d_layer(target_layer: nn.Module) -> bool | None:
335
347
  """
336
- Evaluates whether the target layer is a 2D-convolutional layer.
348
+ Evaluates whether the target layer is at least a 2D-convolutional layer.
337
349
 
338
350
  :param target_layer: (mandatory) A PyTorch module.
339
351
 
340
352
  :return:
341
- - is_2d_layer: A boolean indicating whether the target layers 2D-convolutional layer.
353
+ - is_2d_layer: A boolean indicating whether the target layers 2D-convolutional layer. If the target layer is
354
+ a 3D-convolutional layer, the function returns a None.
342
355
  """
343
356
 
344
357
  if isinstance(target_layer, nn.Conv1d):
345
358
  is_2d_layer = False
346
359
  elif isinstance(target_layer, nn.Conv2d):
347
360
  is_2d_layer = True
361
+ elif isinstance(target_layer, nn.Conv3d):
362
+ is_2d_layer = None
348
363
  else:
349
364
  is_2d_layer = CamBuilder._is_2d_layer(target_layer)
350
365
  return is_2d_layer
@@ -68,13 +68,19 @@ class TfCamBuilder(CamBuilder):
68
68
 
69
69
  # Check for input/output attributes
70
70
  if not hasattr(model, "inputs"):
71
- print("Your TensorFlow/Keras model has no attribute 'inputs'. Ensure it is built or loaded correctly, or\n"
72
- "provide a different one. If the model contains a 'Sequential' attribute, that Sequential object may\n"
73
- "be a suitable candidate for an input model.")
71
+ self._CamBuilder__print_justify("Your TensorFlow/Keras model has no attribute 'inputs'. Ensure it is built "
72
+ "or loaded correctly, or provide a different one. If the model contains a "
73
+ "'Sequential' attribute, that Sequential object may be a suitable candidate"
74
+ " for an input model.")
74
75
  elif not hasattr(model, "output"):
75
- print("Your TensorFlow/Keras model has no attribute 'output'. Ensure it is built or loaded correctly, or\n"
76
- "provide a different one. If the model contains a 'Sequential' attribute, that Sequential object may\n"
77
- "be a suitable candidate for an input model.")
76
+ if hasattr(model, "outputs"):
77
+ self.model.output = model.outputs[self.model_output_index] if self.model_output_index is not None \
78
+ else model.outputs[0]
79
+ else:
80
+ self._CamBuilder__print_justify("Your TensorFlow/Keras model has no attribute 'output' or 'outputs'. "
81
+ "Ensure it is built or loaded correctly, or provide a different one. If"
82
+ " the model contains a 'Sequential' attribute, that Sequential object "
83
+ "may be a suitable candidate for an input model.")
78
84
 
79
85
  def _get_layers_pool(self, show: bool = False, extend_search: bool = False) \
80
86
  -> Dict[str, tf.keras.layers.Layer | Any]:
@@ -145,7 +151,8 @@ class TfCamBuilder(CamBuilder):
145
151
  """
146
152
 
147
153
  if (isinstance(layer, keras.layers.Conv1D) or isinstance(layer, keras.layers.Conv2D) or
148
- isinstance(layer, keras.layers.Softmax) or isinstance(layer, keras.Sequential)):
154
+ isinstance(layer, keras.layers.Conv3D) or isinstance(layer, keras.layers.Softmax) or
155
+ isinstance(layer, keras.Sequential)):
149
156
  super()._show_layer(name, layer, potential=potential)
150
157
 
151
158
  def _create_raw_batched_cams(self, data_list: List[np.ndarray | tf.Tensor], target_class: int,
@@ -157,7 +164,8 @@ class TfCamBuilder(CamBuilder):
157
164
  Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
158
165
  and target class). Additionally, it returns the class probabilities predicted by the model.
159
166
 
160
- :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
167
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal, an image, or
168
+ a video/volume.
161
169
  :param target_class: (mandatory) An integer representing the target class for the explanation.
162
170
  :param target_layer: (mandatory) A string representing the target layer for the explanation. This string should
163
171
  identify either TensorFlow/Keras layers or it should be a class dictionary key, used to retrieve the layer
@@ -247,64 +255,74 @@ class TfCamBuilder(CamBuilder):
247
255
 
248
256
  cam_list = []
249
257
  is_2d_layer = self._is_2d_layer(target_layer)
258
+ is_3d_layer = is_2d_layer is None
250
259
  for i in range(len(data_list)):
251
260
  if explainer_type == "HiResCAM":
252
- cam = self._get_hirecam_map(is_2d_layer=is_2d_layer, batch_idx=i)
261
+ cam = self._get_hirecam_map(is_2d_layer=is_2d_layer, is_3d_layer=is_3d_layer, batch_idx=i)
253
262
  else:
254
- cam = self._get_gradcam_map(is_2d_layer=is_2d_layer, batch_idx=i)
263
+ cam = self._get_gradcam_map(is_2d_layer=is_2d_layer, is_3d_layer=is_3d_layer, batch_idx=i)
255
264
  cam_list.append(cam.numpy())
256
265
 
257
266
  return cam_list, target_probs
258
267
 
259
- def _get_gradcam_map(self, is_2d_layer: bool, batch_idx: int) -> tf.Tensor:
268
+ def _get_gradcam_map(self, is_2d_layer: bool, is_3d_layer: bool, batch_idx: int) -> tf.Tensor:
260
269
  """
261
270
  Compute the CAM using the vanilla Gradient-weighted Class Activation Mapping (Grad-CAM) algorithm.
262
271
 
263
272
  :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
273
+ :param is_3d_layer: (mandatory) A boolean indicating whether the target layers 3D-convolutional layer.
264
274
  :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
265
275
  list.
266
276
 
267
- :return: cam: A TensorFlow/Keras tensor representing the Class Activation Map (CAM) for the batch_idx-th input,
268
- built with the Grad-CAM algorithm.
277
+ :return:
278
+ - cam: A TensorFlow/Keras tensor representing the Class Activation Map (CAM) for the batch_idx-th input,
279
+ built with the Grad-CAM algorithm.
269
280
  """
270
281
 
271
- if is_2d_layer:
282
+ if is_2d_layer is not None and is_2d_layer:
272
283
  dim_mean = (0, 1)
284
+ elif is_3d_layer:
285
+ dim_mean = (0, 1, 2)
273
286
  else:
274
287
  dim_mean = 0
275
288
  weights = tf.reduce_mean(self.gradients[batch_idx], axis=dim_mean)
276
289
  activations = self.activations[batch_idx].numpy()
277
290
 
278
291
  for i in range(activations.shape[-1]):
279
- if is_2d_layer:
292
+ if is_2d_layer is not None and is_2d_layer:
280
293
  activations[:, :, i] *= weights[i]
294
+ elif is_3d_layer:
295
+ activations[:, :, :, i] *= weights[i]
281
296
  else:
282
297
  activations[:, i] *= weights[i]
283
- activations[:, i] *= weights[i]
284
298
 
285
299
  cam = tf.reduce_sum(tf.convert_to_tensor(activations), axis=-1)
286
300
  if not self.is_regression_network:
287
301
  cam = tf.nn.relu(cam)
288
302
  return cam
289
303
 
290
- def _get_hirecam_map(self, is_2d_layer: bool, batch_idx: int) -> tf.Tensor:
304
+ def _get_hirecam_map(self, is_2d_layer: bool, is_3d_layer: bool, batch_idx: int) -> tf.Tensor:
291
305
  """
292
306
  Compute the CAM using the High-Resolution Class Activation Mapping (HiResCAM) algorithm.
293
307
 
294
308
  :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
309
+ :param is_3d_layer: (mandatory) A boolean indicating whether the target layers 3D-convolutional layer.
295
310
  :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
296
311
  list.
297
312
 
298
- :return: cam: A TensorFlow/Keras tensor representing the Class Activation Map (CAM) for the batch_idx-th input,
299
- built with the HiResCAM algorithm.
313
+ :return:
314
+ - cam: A TensorFlow/Keras tensor representing the Class Activation Map (CAM) for the batch_idx-th input,
315
+ built with the HiResCAM algorithm.
300
316
  """
301
317
 
302
318
  activations = self.activations[batch_idx].numpy()
303
319
  gradients = self.gradients[batch_idx].numpy()
304
320
 
305
321
  for i in range(activations.shape[-1]):
306
- if is_2d_layer:
322
+ if is_2d_layer is not None and is_2d_layer:
307
323
  activations[:, :, i] *= gradients[:, :, i]
324
+ elif is_3d_layer:
325
+ activations[:, :, :, i] *= gradients[:, :, :, i]
308
326
  else:
309
327
  activations[:, i] *= gradients[:, i]
310
328
 
@@ -314,20 +332,23 @@ class TfCamBuilder(CamBuilder):
314
332
  return cam
315
333
 
316
334
  @staticmethod
317
- def _is_2d_layer(target_layer: tf.keras.layers.Layer) -> bool:
335
+ def _is_2d_layer(target_layer: tf.keras.layers.Layer) -> bool | None:
318
336
  """
319
337
  Evaluates whether the target layer is a 2D-convolutional layer.
320
338
 
321
339
  :param target_layer: (mandatory) A TensorFlow/Keras layer.
322
340
 
323
341
  :return:
324
- - is_2d_layer: A boolean indicating whether the target layers 2D-convolutional layer.
342
+ - is_2d_layer: A boolean indicating whether the target layers 2D-convolutional layer. If the target layer is
343
+ a 3D-convolutional layer, the function returns a None.
325
344
  """
326
345
 
327
346
  if isinstance(target_layer, keras.layers.Conv1D):
328
347
  is_2d_layer = False
329
348
  elif isinstance(target_layer, keras.layers.Conv2D):
330
349
  is_2d_layer = True
350
+ elif isinstance(target_layer, keras.layers.Conv3D):
351
+ is_2d_layer = None
331
352
  else:
332
353
  is_2d_layer = CamBuilder._is_2d_layer(target_layer)
333
354
  return is_2d_layer
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
- Name: signal-grad-cam
3
- Version: 1.0.1
2
+ Name: signal_grad_cam
3
+ Version: 2.0.0
4
4
  Summary: SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.
5
5
  Home-page: https://github.com/samuelepe11/signal_grad_cam
6
6
  Author: Samuele Pe
@@ -19,6 +19,7 @@ Requires-Dist: opencv-python
19
19
  Requires-Dist: torch
20
20
  Requires-Dist: keras
21
21
  Requires-Dist: tensorflow
22
+ Requires-Dist: imageio
22
23
 
23
24
  <div id="top"></div>
24
25
 
@@ -31,7 +32,7 @@ Requires-Dist: tensorflow
31
32
  SignalGrad-CAM
32
33
  </h1>
33
34
 
34
- <h3 align="center">SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.</h3>
35
+ <h3 align="center">SignalGrad-CAM aims at generalising Grad-CAM to time-based applications, while enhancing usability and efficiency.</h3>
35
36
 
36
37
  <p align="center">
37
38
  <a href="https://github.com/bmi-labmedinfo/signal_grad_cam"><strong>Explore the docs</strong></a>
@@ -61,9 +62,9 @@ Requires-Dist: tensorflow
61
62
  <!-- ABOUT THE PROJECT -->
62
63
  ## About The Project
63
64
 
64
- <p align="justify">Deep learning models have demonstrated remarkable performance across various domains; however, their black-box nature hinders interpretability and trust. As a result, the demand for explanation algorithms has grown, driving advancements in the field of eXplainable AI (XAI). However, relatively few efforts have been dedicated to developing interpretability methods for signal-based models. We introduce SignalGrad-CAM (SGrad-CAM), a versatile and efficient interpretability tool that extends the principles of Grad-CAM to both 1D- and 2D-convolutional neural networks for signal processing. SGrad-CAM is designed to interpret models for either image or signal elaboration, supporting both PyTorch and TensorFlow/Keras frameworks, and provides diagnostic and visualization tools to enhance model transparency. The package is also designed for batch processing, ensuring efficiency even for large-scale applications, while maintaining a simple and user-friendly structure.</p>
65
+ <p align="justify">Deep learning models have achieved remarkable performance across many domains, yet their black-box nature often limits interpretability and trust. This has fueled the development of explanation algorithms within the field of eXplainable AI (XAI). Despite this progress, relatively few methods target time-based convolutional neural networks (CNNs), such as 1D-CNNs for signals and 3D-CNNs for videos. We present SignalGrad-CAM (SGrad-CAM), a versatile and efficient interpretability tool that extends the principles of Grad-CAM to 1D, 2D, and 3D CNNs. SGrad-CAM supports model interpretation for signals, images, and video/volume data in both PyTorch and TensorFlow/Keras frameworks. It includes diagnostic and visualization tools to enhance transparency, and its batch-processing design ensures scalability for large datasets while maintaining a simple, user-friendly structure.</p>
65
66
 
66
- <p align="justify"><i><b>Keywords:</b> eXplainable AI, explanations, local explanation, fidelity, interpretability, transparency, trustworthy AI, feature importance, saliency maps, CAM, Grad-CAM, black-box, deep learning, CNN, signals, time series</i></p>
67
+ <p align="justify"><i><b>Keywords:</b> eXplainable AI, XAI, explanations, local explanation, contrastive explanations, cXAI, fidelity, interpretability, transparency, trustworthy AI, feature importance, saliency maps, CAM, Grad-CAM, HiResCAM, black-box, deep learning, CNN, 1D-CNN, 2D-CNN, 3D-CNN, signals, time series, images, videos, volumes.</i></p>
67
68
 
68
69
  <p align="right"><a href="#top">Back To Top</a></p>
69
70
 
@@ -83,15 +84,13 @@ Requires-Dist: tensorflow
83
84
 
84
85
  <!-- USAGE EXAMPLES -->
85
86
  ## Usage
86
- <p align="justify">
87
- Here's a basic example that illustrates SignalGrad-CAM common usage.
87
+ <p align="justify">Here's a basic example that illustrates SignalGrad-CAM common usage.</p>
88
88
 
89
- First, train a classifier on the data or select an already trained model, then instantiate `TorchCamBuilder` (if you are working with a PyTorch model) or `TfCamBuilder` (if the model is built in TensorFlow/Keras).
89
+ <p align="justify">First, train a CNN on the data or load a pre-trained model, then instantiate `TorchCamBuilder` (if you are working with a PyTorch model) or `TfCamBuilder` (if the model is built in TensorFlow/Keras).</p>
90
90
 
91
- Besides the model, `TorchCamBuilder` requires additional information to function effectively. For example, you may provide a list of class labels, a preprocessing function, or an index indicating which dimension corresponds to time. These attributes allow SignalGrad-CAM to be applied to a wide range of models.
91
+ <p align="justify">Besides the model, `TorchCamBuilder` requires additional information to function effectively. For example, you may provide a list of class labels, a pre-processing function, or an index indicating which dimension corresponds to time (for signal elaboration). These attributes allow SignalGrad-CAM to be applied to a wide range of models.</p>
92
92
 
93
- The constructor displays a list of available Grad-CAM algorithms for explanation, as well as a list of layers that can be used as target for the algorithm. It also identifies any Sigmoid/Softmax layer, since its presence or absence will slightly change the algorithm's workflow.
94
- </p>
93
+ <p align="justify">The constructor displays a list of available Grad-CAM algorithms for explanation (Grad-CAM and HiResCAM at the moment), as well as a list of layers that can be used as target for the algorithm. It also identifies any Sigmoid/Softmax layer, since its presence or absence will slightly change the algorithm's workflow.</p>
95
94
 
96
95
  ```python
97
96
  import numpy as np
@@ -114,14 +113,14 @@ class_labels = ["Class 1", "Class 2", "Class 3"]
114
113
  cam_builder = TorchCamBuilder(model=model, transform_fn=preprocess_fn, class_names=class_labels, time_axs=1)
115
114
  ```
116
115
 
117
- <p align="justify">Now, you can use the `cam_builder` object to generate class activation maps from a list of input data using the <i>`get_cams`</i> method. You can specify multiple algorithm names, target layers, or target classes as needed.
116
+ <p align="justify">Now, you can use the `cam_builder` object to generate class activation maps from a list of input data using the <i>`get_cams`</i> method. You can specify multiple algorithm names, target layers, or target classes as needed. As described in each function's documentation, every input (such as data and labels) need to be rearranged into lists for versatility.</p>
118
117
 
119
- The function's attributes allow users to customize the visualization (e.g., setting axis ticks or labels). If a result directory path is provided, the output is stored as a '.png' file; otherwise, it is displayed. In all cases, the function returns a dictionary containing the requested CAMs, along with the model's predictions and importance score ranges.
118
+ <p align="justify">The function's attributes allow users to customize the visualization (e.g., setting axes ticks or labels). If a result directory path is provided, the output is stored as a '.png' file; otherwise, it is simply displayed. In all cases, the function returns a dictionary containing the requested CAMs, along with the model's predictions and importance score ranges.</p>
120
119
 
121
- Finally, several visualization tools are available to gain deeper insights into the model's behavior. The display can be customized by adjusting line width, point extension, aspect ratio, and more:
122
- * <i>`single_channel_output_display`</i> plots the selected channels using a color scheme that reflects the importance of each input feature.
123
- * <i>`overlapped_output_display`</i> superimposes CAMs onto the corresponding input in an image-like format, allowing users to capture the overall distribution of input importance.
124
- </p>
120
+ <p align="justify">Finally, several visualization tools are available to gain deeper insights into the model's behavior. Their display can be customized by adjusting features like line width and point extension (for the drawing of signals and their explanation) along with others (e.g., aspect ratio) for a more general task:</p>
121
+
122
+ * <p align="justify"><i>`single_channel_output_display`</i> plots the selected input channels using a color scheme that reflects the importance of each input feature.</p>
123
+ * <p align="justify"><i>`overlapped_output_display`</i> superimposes CAMs onto the corresponding input in an image-like format, allowing users to capture the overall distribution of input importance.</p>
125
124
 
126
125
  ```python
127
126
  # Prepare data
@@ -132,31 +131,38 @@ target_classes = [0, 1]
132
131
 
133
132
  # Create CAMs
134
133
  cam_dict, predicted_probs_dict, score_ranges_dict = cam_builder.get_cam(data_list=data_list, data_labels=data_labels_list,
135
- target_classes=target_classes, explainer_types="Grad-CAM",
136
- target_layers="conv1d_layer_1", softmax_final=True,
137
- data_sampling_freq=25, dt=1, axes_names=("Time (s)", "Channels"))
134
+ target_classes=target_classes, explainer_types="Grad-CAM",
135
+ target_layers="conv1d_layer_1", softmax_final=True,
136
+ data_sampling_freq=25, dt=1, axes_names=("Time (s)", "Channels"))
138
137
 
139
138
  # Visualize single channel importance
140
139
  selected_channels_indices = [0, 2, 10]
141
140
  cam_builder.single_channel_output_display(data_list=data_list, data_labels=data_labels_list, predicted_probs_dict=predicted_probs_dict,
142
- cams_dict=cam_dict, explainer_types="Grad-CAM", target_classes=target_classes,
143
- target_layers="target_layer_name", desired_channels=selected_channels_indices,
144
- grid_instructions=(1, len(selected_channels_indices), bar_ranges_dict=score_ranges_dict,
145
- results_dir="path_to_your_result_directoory", data_sampling_freq=25, dt=1, line_width=0.5,
146
- axes_names=("Time (s)", "Amplitude (mV)"))
141
+ cams_dict=cam_dict, explainer_types="Grad-CAM", target_classes=target_classes,
142
+ target_layers="target_layer_name", desired_channels=selected_channels_indices,
143
+ grid_instructions=(1, len(selected_channels_indices), bar_ranges_dict=score_ranges_dict,
144
+ results_dir="path_to_your_result_directoory", data_sampling_freq=25, dt=1, line_width=0.5,
145
+ axes_names=("Time (s)", "Amplitude (mV)"))
147
146
 
148
147
  # Visualize overall importance
149
148
  cam_builder.overlapped_output_display(data_list=data_list, data_labels=data_labels_list, predicted_probs_dict=predicted_probs_dict,
150
149
  cams_dict=cam_dict, explainer_types="Grad-CAM", target_classes=target_classes,
151
- target_layers="target_layer_name", fig_size=(20 * len(your_data_X), 20),
152
- grid_instructions=(len(your_data_X), 1), bar_ranges_dict=score_ranges_dict, data_names=item_names
153
- results_dir_path="path_to_your_result_directoory", data_sampling_freq=25, dt=1)
150
+ target_layers="target_layer_name", fig_size=(20 * len(your_data_X), 20),
151
+ grid_instructions=(len(your_data_X), 1), bar_ranges_dict=score_ranges_dict, data_names=item_names
152
+ results_dir_path="path_to_your_result_directoory", data_sampling_freq=25, dt=1)
154
153
  ```
155
154
 
156
- You can also explore the Python scripts available in the examples directory of the repository [here](https://github.com/bmi-labmedinfo/signal_grad_cam/examples), which provide complete, ready-to-run demonstrations for both PyTorch and TensorFlow/Keras models. These examples include open-source models for image and signal classification using 1D- and 2D-CNN architectures, and they illustrate how to apply the recently added feature for creating and displaying "contrastive explanations" in each scenario.
155
+ <p align="justify">You can also explore the Python scripts available in the examples directory of the repository [here](https://github.com/bmi-labmedinfo/signal_grad_cam/examples), which provide complete, ready-to-run demonstrations for both PyTorch and TensorFlow/Keras models. These examples include open-source models for signal, image and video/volume classification using 1D, 2D, and 3D CNN architectures. Moreover, these tutorials illustrate how to deploy the recently added feature contrastive explanations in each scenario.</p>
157
156
 
158
157
  See the [open issues](https://github.com/bmi-labmedinfo/signal_grad_cam/issues) for a full list of proposed features (and known issues).
159
158
 
159
+ ## <i>NEW!</i> Updates in SignalGrad-CAM
160
+ <p align="justify">Compared to previous versions, SignalGrad-CAM now offers the following enhancements:</p>
161
+
162
+ * <p align="justify"><i>Support for regression tasks:</i> SGrad-CAM can now handle regression-based models. Previously, substantial adjustments were required for these tasks, similar to those still needed for segmentation or generative models.</p>
163
+ * <p align="justify"><i>Contrastive explanations:</i> Users can generate and visualize contrastive explanations by specifying one or more foil classes via the parameter <i>`contrastive_foil_classes`</i>.</p>
164
+ * <p align="justify"><i>3D-CNN support for videos and volumetric data:</i> After expliciting the time axis in the constructor with the parameter <i>`time_axs`</i>, the same functions used for 1D and 2D data now work seamlessly for 3D-CNNs. Outputs include GIF files for quick visualization of 3D activation maps. For a more detailed analysis, users can also request separate PNG images for each volume slice (across the indicated time axis) or video frame using the parameter <i>`show_single_video_frames`</i>.</p>
165
+
160
166
  <p align="right"><a href="#top">Back To Top</a></p>
161
167
 
162
168
 
@@ -164,7 +170,7 @@ If you use the SignalGrad-CAM software for your projects, please cite it as:
164
170
 
165
171
  ```
166
172
  @inproceedings{pe_sgradcam_2025_paper,
167
- author = {Pe, Samuele and Buonocore, Tommaso Mario and Giovanna, Nicora and Enea, Parimbelli}},
173
+ author = {Pe, Samuele and Buonocore, Tommaso Mario and Giovanna, Nicora and Enea, Parimbelli},
168
174
  title = {SignalGrad-CAM: Beyond Image Explanation},
169
175
  booktitle = {Joint Proceedings of the xAI 2025 Late-breaking Work, Demos and Doctoral Consortium co-located with the 3rd World Conference on eXplainable Artificial Intelligence (xAI 2025), Istanbul, Turkey, July 9-11, 2025},
170
176
  series = {CEUR Workshop Proceedings},
@@ -179,9 +185,9 @@ If you use the SignalGrad-CAM software for your projects, please cite it as:
179
185
  ```
180
186
  @software{pe_sgradcam_2025_repo,
181
187
  author = {Pe, Samuele},
182
- title = {{SignalGrad-CAM}},
188
+ title = {SignalGrad-CAM},
183
189
  url = {https://github.com/bmi-labmedinfo/signal_grad_cam},
184
- version = {1.0.0},
190
+ version = {1.0.1},
185
191
  year = {2025}
186
192
  }
187
193
  ```
@@ -0,0 +1,9 @@
1
+ signal_grad_cam/__init__.py,sha256=JyFFQebs1Rm5r9FCgE68Ii39lLckW8N_gj-nAL5hq2U,137
2
+ signal_grad_cam/cam_builder.py,sha256=k57xMcvHmx1-V08P5rdCDpFaNpO5c-AjX1cIfV3EF9k,92988
3
+ signal_grad_cam/pytorch_cam_builder.py,sha256=0qS0kiNd8uumLI0gt5jK6cxO491YLYoafMnI7Najr-Q,20490
4
+ signal_grad_cam/tensorflow_cam_builder.py,sha256=zG12wFD2T_5_wbh_JCtfcQsuRBZ3nySNZVPlG1uIBDI,20538
5
+ signal_grad_cam-2.0.0.dist-info/LICENSE,sha256=pCSaMipV39klP0dlf75SHw5PTl00_cLlS-EiC-LmOkw,1088
6
+ signal_grad_cam-2.0.0.dist-info/METADATA,sha256=t5Ajz2_Br7lhNoigf1IVMkD3GtVCfyfreb_R5jH-qtc,13594
7
+ signal_grad_cam-2.0.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
8
+ signal_grad_cam-2.0.0.dist-info/top_level.txt,sha256=S6lf3mfh2uGXJKnUS3qnw6arQu-x3gO8m82WdY7JAIA,16
9
+ signal_grad_cam-2.0.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.2)
2
+ Generator: setuptools (72.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,9 +0,0 @@
1
- signal_grad_cam/__init__.py,sha256=JyFFQebs1Rm5r9FCgE68Ii39lLckW8N_gj-nAL5hq2U,137
2
- signal_grad_cam/cam_builder.py,sha256=419Xejd3SaY09Jo6TnDhnSGJTkYxHjuHJZrRkY4yLI0,75092
3
- signal_grad_cam/pytorch_cam_builder.py,sha256=WAlfyiqHC9i7wQn1Xxpx8xBas_AtWhC7WnbUoKznOxk,19513
4
- signal_grad_cam/tensorflow_cam_builder.py,sha256=x6QZBdjuS_YwsJ6S3snLSpWray-baueBx0wvpTMcn1k,19087
5
- signal_grad_cam-1.0.1.dist-info/LICENSE,sha256=pCSaMipV39klP0dlf75SHw5PTl00_cLlS-EiC-LmOkw,1088
6
- signal_grad_cam-1.0.1.dist-info/METADATA,sha256=nEqgaq_65jYArUt3mLkGxXZMLjkou1hZOf_m1j3ncT0,11956
7
- signal_grad_cam-1.0.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
8
- signal_grad_cam-1.0.1.dist-info/top_level.txt,sha256=S6lf3mfh2uGXJKnUS3qnw6arQu-x3gO8m82WdY7JAIA,16
9
- signal_grad_cam-1.0.1.dist-info/RECORD,,