signal-grad-cam 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of signal-grad-cam might be problematic. Click here for more details.

@@ -0,0 +1,1032 @@
1
+ # Import dependencies
2
+ import os
3
+ import random
4
+ import numpy as np
5
+ import cv2
6
+ import matplotlib.pyplot as plt
7
+ import matplotlib.colors as m_colors
8
+ import re
9
+ import torch
10
+ import tensorflow as tf
11
+ from typing import Callable, List, Tuple, Dict, Any
12
+
13
+
14
+ # Class
15
+ class CamBuilder:
16
+ """
17
+ Represents a generic Class Activation Map (CAM) builder, supporting multiple methods such as Grad-CAM and HiResCAM.
18
+ """
19
+
20
+ explainer_types = {"Grad-CAM": "Gradient-weighted Class Activation Mapping",
21
+ "HiResCAM": "High-Resolution Class Activation Mapping"}
22
+
23
+ def __init__(self, model: torch.nn.Module | tf.keras.Model | Any,
24
+ transform_fn: Callable[[np.ndarray], torch.Tensor | tf.Tensor] = None,
25
+ class_names: List[str] = None, time_axs: int = 1, input_transposed: bool = False,
26
+ ignore_channel_dim: bool = False, model_output_index: int = None, extend_search: bool = False,
27
+ padding_dim: int = None, seed: int = 11):
28
+ """
29
+ Initializes the CamBuilder class. The constructor also displays, if present and retrievable, the 1D- and
30
+ 2D-convolutional layers in the network, as well as the final Sigmoid/Softmax activation. Additionally, the CAM
31
+ algorithms available for generating the explanations are shown.
32
+
33
+ :param model: (mandatory) A torch.nn.Module, tf.keras.Model, or any object (with PyTorch or TensorFlow/Keras
34
+ layers among its attributes) representing a convolutional neural network model to be explained.
35
+ Unconventional models should always be set to inference mode before being provided as inputs.
36
+ :param transform_fn: (optional, default is None) A callable function to preprocess np.ndarray data before model
37
+ evaluation. This function is also expected to convert data into either PyTorch or TensorFlow tensors.
38
+ :param class_names: (optional, default is None) A list of strings where each string represents the name of an
39
+ output class.
40
+ :param time_axs: (optional, default is 1) An integer index indicating whether the input signal's time axis is
41
+ represented as the first or second dimension of the input array.
42
+ :param input_transposed: (optional, default is False) A boolean indicating whether the input array is transposed
43
+ during model inference, either by the model itself or by the preprocessing function.
44
+ :param ignore_channel_dim: (optional, default is False) A boolean indicating whether to ignore the channel
45
+ dimension. This is useful when the model expects inputs without a singleton channel dimension.
46
+ :param model_output_index: (optional, default is None) An integer index specifying which of the model's outputs
47
+ represents output scores (or probabilities). If there is only one output, this argument can be ignored.
48
+ :param extend_search: (optional, default is False) A boolean flag indicating whether to deepend the search for
49
+ candidate layers. It should be set true if no convolutional layer was found.
50
+ :param padding_dim: (optional, default is None) An integer specifying the maximum length along the time axis to
51
+ which each item will be padded for batching.
52
+ :param seed: (optional, default is 11) An integer seed for random number generators, used to ensure
53
+ reproducibility during model evaluation.
54
+ """
55
+
56
+ # Set seeds
57
+ np.random.seed(seed)
58
+ random.seed(seed)
59
+
60
+ # Initialize attributes
61
+ self.model = model
62
+ self.transform_fn = transform_fn
63
+ self.class_names = class_names
64
+ self.extend_search = extend_search
65
+
66
+ self.time_axs = time_axs
67
+ self.input_transposed = input_transposed
68
+ self.ignore_channel_dim = ignore_channel_dim
69
+ self.model_output_index = model_output_index
70
+ self.padding_dim = padding_dim
71
+ self.original_dims = []
72
+
73
+ self.gradients = None
74
+ self.activations = None
75
+
76
+ # Show available explainers
77
+ print()
78
+ print("====================================================================================================")
79
+ print(" Executing SignalGrad-CAM ")
80
+ print("====================================================================================================")
81
+ print()
82
+ print("AVAILABLE EXPLAINERS:")
83
+ for k, v in self.explainer_types.items():
84
+ print(f" - Explainer identifier '{k}': {v}")
85
+
86
+ # Show available 1D or 2D convolutional layers
87
+ print()
88
+ print("SEARCHING FOR NETWORK LAYERS:")
89
+ self.__print_justify("Please, verify that your network contains at least one 1D or 2D convolutional layer "
90
+ "and note the names of the layers that are of interest to you. If the desired layer is not"
91
+ " present in the following list, it can still be accessed by the name used in the network "
92
+ "to identify it.\nVerify whether the model ends with an activation function from the "
93
+ "Softmax family (such as Sigmoid o Softmax). Even if this activation function is not "
94
+ "present in the following list, ensure to check if it is applied at the end of the "
95
+ "network. Please make sure that the provided model is set in inference ('eval') mode for "
96
+ "PyTorch models and that TensorFlow/Keras models have been built (they must have the "
97
+ "specific 'inputs' and 'output' attributes)\nNetwork layers found (name: type)")
98
+ self._get_layers_pool(show=True, extend_search=extend_search)
99
+ print()
100
+
101
+ def get_cam(self, data_list: List[np.ndarray], data_labels: List[int], target_classes: int | List[int],
102
+ explainer_types: str | List[str], target_layers: str | List[str], softmax_final: bool,
103
+ data_names: List[str] = None, data_sampling_freq: float = None, dt: float = 10,
104
+ channel_names: List[str | float] = None, results_dir_path: str = None, aspect_factor: float = 100,
105
+ data_shape_list: List[Tuple[int, int]] = None, time_names: List[str | float] = None,
106
+ axes_names: Tuple[str | None, str | None] | List[str | None] = None) \
107
+ -> Tuple[Dict[str, List[np.ndarray]], Dict[str, np.ndarray], Dict[str, Tuple[np.ndarray, np.ndarray]]]:
108
+ """
109
+ Allows the user to request Class Activation Maps (CAMs) for a given list of inputs, a set of algorithms,
110
+ target classes, and target layers. Returns a standard visualization of each CAM along with representative
111
+ outputs, enabling a customized display of CAMs. Optional inputs are employed for a more detailed
112
+ visualization of the results.
113
+
114
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
115
+ :param data_labels: (mandatory) A list of integers representing the true labels of the data to be explained.
116
+ :param target_classes: (mandatory) An integer or a list of integers representing the target classes for the
117
+ explanation.
118
+ :param explainer_types: (mandatory) A string or a list of strings representing the desired algorithms for the
119
+ explanation. These strings should identify one of the CAM algorithms allowed, as listed by the class
120
+ constructor.
121
+ :param target_layers: (mandatory) A string or a list of strings representing the target layers for the
122
+ explanations. These strings should identify either PyTorch named modules, TensorFlow/Keras layers, or they
123
+ should be class dictionary keys, used to retrieve each layer from the class attributes.
124
+ :param softmax_final: (mandatory) A boolean indicating whether the network terminates with a Sigmoid/Softmax
125
+ activation function.
126
+ :param data_names: (optional, default is None) A list of strings where each string represents the name of an
127
+ input item.
128
+ :param data_sampling_freq: (optional, default is None) A numerical value representing the sampling frequency of
129
+ signal inputs in samples per second.
130
+ :param dt: (optional, default is 10) A numerical value representing the granularity of the time axis in seconds
131
+ in the output display.
132
+ :param channel_names: (optional, default is None) A list of strings where each string represents the name of a
133
+ signal channel for tick settings.
134
+ :param results_dir_path: (optional, default is None) A string representing the relative path to the directory
135
+ for storing results. If None, the output will be displayed in a figure.
136
+ :param aspect_factor: (optional, default is 100) A numerical value to set the aspect ratio of the output signal
137
+ one-dimensional CAM.
138
+ :param data_shape_list: (optional, default is None) A list of integer tuples storing the original input sizes,
139
+ used to set the CAM shape after resizing during preprocessing. The expected format is number of rows x
140
+ number of columns.
141
+ :param time_names: (optional, default is None) A list of strings representing tick names for the time axis.
142
+ :param axes_names: (optional, default is None) A tuple of strings representing names for X and Y axes,
143
+ respectively.
144
+
145
+ :return:
146
+ - cams_dict: A dictionary storing a list of CAMs. Each list contains CAMs for each item in the input data
147
+ list, corresponding to a given setting (defined by algorithm, target layer, and target class).
148
+ - predicted_probs_dict: A dictionary storing a np.ndarray. Each array represents the inferred class
149
+ probabilities for each item in the input list.
150
+ - bar_ranges_dict: A dictionary storing a tuple of np.ndarrays. Each tuple contains two np.ndarrays
151
+ corresponding to the minimum and maximum importance scores per CAM for each item in the input data list,
152
+ based on a given setting (defined by algorithm, target layer, and target class).
153
+ """
154
+
155
+ # Check data names
156
+ if data_names is None:
157
+ data_names = ["item" + str(i) for i in range(len(data_list))]
158
+
159
+ # Check input types
160
+ target_classes, explainer_types, target_layers = self.__check_input_types(target_classes, explainer_types,
161
+ target_layers)
162
+ for explainer_type in explainer_types:
163
+ if explainer_type not in self.explainer_types:
164
+ raise ValueError("'explainer_types' should be an explainer identifier or a list of explainer "
165
+ "identifiers.")
166
+
167
+ # Draw CAMs
168
+ cams_dict = {}
169
+ predicted_probs_dict = {}
170
+ bar_ranges_dict = {}
171
+ for explainer_type in explainer_types:
172
+ for target_class in target_classes:
173
+ for target_layer in target_layers:
174
+ cam_list, output_probs, bar_ranges = self.__create_batched_cams(data_list, target_class,
175
+ target_layer, explainer_type,
176
+ softmax_final, data_shape_list)
177
+ item_key = explainer_type + "_" + target_layer + "_class" + str(target_class)
178
+ cams_dict.update({item_key: cam_list})
179
+ predicted_probs_dict.update({item_key: output_probs})
180
+ bar_ranges_dict.update({item_key: bar_ranges})
181
+ self.__display_output(data_labels, target_class, explainer_type, target_layer, cam_list, output_probs,
182
+ results_dir_path, data_names, data_sampling_freq, dt, aspect_factor,
183
+ bar_ranges, channel_names, time_names=time_names, axes_names=axes_names)
184
+
185
+ return cams_dict, predicted_probs_dict, bar_ranges_dict
186
+
187
+ # Check data names
188
+ def overlapped_output_display(self, data_list: List[np.ndarray], data_labels: List[int],
189
+ predicted_probs_dict: Dict[str, np.ndarray], cams_dict: Dict[str, List[np.ndarray]],
190
+ explainer_types: str | List[str], target_classes: int | List[int],
191
+ target_layers: str | List[str], target_item_ids: List[int] = None,
192
+ data_names: List[str] = None, grid_instructions: Tuple[int, int] = None,
193
+ bar_ranges_dict: Dict[str, Tuple[np.ndarray, np.ndarray]] = None,
194
+ results_dir_path: str = None, data_sampling_freq: float = None, dt: float = 10,
195
+ channel_names: List[str | float] = None, time_names: List[str | float] = None,
196
+ axes_names: Tuple[str | None, str | None] | List[str | None] = None,
197
+ fig_size: Tuple[int, int] = None) -> None:
198
+ """
199
+ Generates a superimposition of the input data and the selected CAMs, useful for visualizing image explanations
200
+ and multichannel signals with numerous channels, such as frequency spectra.
201
+
202
+
203
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
204
+ :param data_labels: (mandatory) A list of integers representing the true labels of the data to be explained.
205
+ :param predicted_probs_dict: (mandatory) A dictionary storing a np.ndarray. Each array represents the inferred
206
+ class probabilities for each item in the input list.
207
+ :param cams_dict: (mandatory) A dictionary storing a list of CAMs. Each list contains CAMs for each item in the
208
+ input data list, corresponding to a given setting (defined by algorithm, target layer, and target class).
209
+ :param explainer_types: (mandatory) A string or a list of strings representing the desired algorithms for the
210
+ explanation. These strings should identify one of the CAM algorithms allowed, as listed by the class
211
+ constructor.
212
+ :param target_classes: (mandatory) An integer or a list of integers representing the target classes for the
213
+ explanation.
214
+ :param target_layers: (mandatory) A string or a list of strings representing the target layers for the
215
+ explanations. These strings should identify either PyTorch named modules, TensorFlow/Keras layers, or they
216
+ should be class dictionary keys, used to retrieve each layer from the class attributes.
217
+ :param target_item_ids: (optional, default is None) A list of integers representing the target item indices
218
+ among the items in the input data list.
219
+ :param data_names: (optional, default is None) A list of strings where each string represents the name of an
220
+ input item.
221
+ :param grid_instructions: (optional, default is None) A tuple of integers defining the desired tabular layout
222
+ for figure subplots. The expected format is number of columns (width) x number of rows (height).
223
+ :param bar_ranges_dict: A dictionary storing a tuple of np.ndarrays. Each tuple contains two np.ndarrays
224
+ corresponding to the minimum and maximum importance scores per CAM for each item in the input data list,
225
+ based on a given setting (defined by algorithm, target layer, and target class).
226
+ :param results_dir_path: (optional, default is None) A string representing the relative path to the directory
227
+ for storing results. If None, the output will be displayed in a figure.
228
+ :param data_sampling_freq: (optional, default is None) A numerical value representing the sampling frequency of
229
+ signal inputs in samples per second.
230
+ :param dt: (optional, default is 10) A numerical value representing the granularity of the time axis in seconds
231
+ in the output display.
232
+ :param channel_names: (optional, default is None) A list of strings where each string represents the name of a
233
+ signal channel for tick settings.
234
+ :param time_names: (optional, default is None) A list of strings representing tick names for the time axis.
235
+ :param axes_names: (optional, default is None) A tuple of strings representing names for X and Y axes,
236
+ respectively.
237
+ :param fig_size: (optional, default is None) A tuple of integers defining the dimensions of the output figure.
238
+ The expected format is width x height in inches.
239
+ """
240
+
241
+ # Check input types
242
+ target_classes, explainer_types, target_layers = self.__check_input_types(target_classes, explainer_types,
243
+ target_layers)
244
+ if target_item_ids is None:
245
+ target_item_ids = list(range(len(data_list)))
246
+
247
+ # Define window size
248
+ n_items = len(target_item_ids)
249
+ w, h = self.__set_grid(n_items, grid_instructions)
250
+ if w * h < n_items:
251
+ n_items = w * h
252
+ target_item_ids = target_item_ids[:n_items]
253
+ fig_size = fig_size if fig_size is not None else (8 * w, 16 * h)
254
+
255
+ for explainer_type in explainer_types:
256
+ for target_layer in target_layers:
257
+ for target_class in target_classes:
258
+ plt.figure(figsize=fig_size)
259
+ for i in range(n_items):
260
+ cam, item, batch_idx, item_key = self.__get_data_for_plots(data_list, i, target_item_ids,
261
+ cams_dict, explainer_type,
262
+ target_layer, target_class)
263
+
264
+ plt.subplot(w, h, i + 1)
265
+ plt.imshow(item)
266
+ aspect = "auto" if cam.shape[0] / cam.shape[1] < 0.1 else None
267
+
268
+ norm = self.__get_norm(cam)
269
+ map = plt.imshow(cam, cmap="jet", aspect=aspect, norm=norm)
270
+ self.__set_colorbar(bar_ranges_dict[item_key], i)
271
+ map.set_alpha(0.3)
272
+
273
+ self.__set_axes(cam, data_sampling_freq, dt, channel_names, time_names=time_names,
274
+ axes_names=axes_names)
275
+ data_name = data_names[batch_idx] if data_names is not None else "item" + str(batch_idx)
276
+ plt.title(self.__get_cam_title(data_name, target_class, data_labels, batch_idx, item_key,
277
+ predicted_probs_dict))
278
+
279
+ # Store or show CAM
280
+ self.__display_plot(results_dir_path, explainer_type, target_layer, target_class)
281
+
282
+ def single_channel_output_display(self, data_list: List[np.ndarray], data_labels: List[int],
283
+ predicted_probs_dict: Dict[str, np.ndarray],
284
+ cams_dict: Dict[str, List[np.ndarray]], explainer_types: str | List[str],
285
+ target_classes: int | List[int], target_layers: str | List[str],
286
+ target_item_ids: List[int] = None, desired_channels: List[int] = None,
287
+ data_names: List[str] = None, grid_instructions: Tuple[int, int] = None,
288
+ bar_ranges_dict: Dict[str, Tuple[np.ndarray, np.ndarray]] = None,
289
+ results_dir_path: str = None, data_sampling_freq: float = None, dt: float = 10,
290
+ channel_names: List[str | float] = None, time_names: List[str | float] = None,
291
+ axes_names: Tuple[str | None, str | None] | List[str | None] = None,
292
+ fig_size: Tuple[int, int] = None, line_width: float = 0.1,
293
+ marker_width: float = 30) -> None:
294
+ """
295
+ Displays input signal channels, coloring each with "jet" colormat according to the corresponding CAMs. This
296
+ visualization is useful for interpreting signal explanations with a limited number of channels. If many channels
297
+ are present, it is recommended to select only a subset.
298
+
299
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
300
+ :param data_labels: (mandatory) A list of integers representing the true labels of the data to be explained.
301
+ :param predicted_probs_dict: (mandatory) A dictionary storing a np.ndarray. Each array represents the inferred
302
+ class probabilities for each item in the input list.
303
+ :param cams_dict: (mandatory) A dictionary storing a list of CAMs. Each list contains CAMs for each item in the
304
+ input data list, corresponding to a given setting (defined by algorithm, target layer, and target class).
305
+ :param explainer_types: (mandatory) A string or a list of strings representing the desired algorithms for the
306
+ explanation. These strings should identify one of the CAM algorithms allowed, as listed by the class
307
+ constructor.
308
+ :param target_classes: (mandatory) An integer or a list of integers representing the target classes for the
309
+ explanation.
310
+ :param target_layers: (mandatory) A string or a list of strings representing the target layers for the
311
+ explanations. These strings should identify either PyTorch named modules, TensorFlow/Keras layers, or they
312
+ should be class dictionary keys, used to retrieve each layer from the class attributes.
313
+ :param target_item_ids: (optional, default is None) A list of integers representing the target item indices
314
+ among the items in the input data list.
315
+ :param desired_channels: (optional, default is None) A list of integers representing the selected channels
316
+ to be displayed.
317
+ :param data_names: (optional, default is None) A list of strings where each string represents the name of an
318
+ input item.
319
+ :param grid_instructions: (optional, default is None) A tuple of integers defining the desired tabular layout
320
+ for figure subplots. The expected format is number of columns (width) x number of rows (height).
321
+ :param bar_ranges_dict: A dictionary storing a tuple of np.ndarrays. Each tuple contains two np.ndarrays
322
+ corresponding to the minimum and maximum importance scores per CAM for each item in the input data list,
323
+ based on a given setting (defined by algorithm, target layer, and target class).
324
+ :param results_dir_path: (optional, default is None) A string representing the relative path to the directory
325
+ for storing results. If None, the output will be displayed in a figure.
326
+ :param data_sampling_freq: (optional, default is None) A numerical value representing the sampling frequency of
327
+ signal inputs in samples per second.
328
+ :param dt: (optional, default is 10) A numerical value representing the granularity of the time axis in seconds
329
+ in the output display.
330
+ :param channel_names: (optional, default is None) A list of strings where each string represents the name of a
331
+ signal channel for tick settings.
332
+ :param time_names: (optional, default is None) A list of strings representing tick names for the time axis.
333
+ :param axes_names: (optional, default is None) A tuple of strings representing names for X and Y axes,
334
+ respectively.
335
+ :param fig_size: (optional, default is None) A tuple of integers defining the dimensions of the output figure.
336
+ The expected format is width x height in inches.
337
+ :param line_width: (optional, default is 0.1) A numerical value representing the width in typographic points of
338
+ the black interpolation lines in the plots.
339
+ :param marker_width: (optional, default is 30) A numerical value representing the size in typographic points**2
340
+ of the jet-colored markers in the plots.
341
+ """
342
+
343
+ # Check input types
344
+ target_classes, explainer_types, target_layers = self.__check_input_types(target_classes, explainer_types,
345
+ target_layers)
346
+ if desired_channels is None:
347
+ try:
348
+ desired_channels = list(range(data_list[0].shape[1]))
349
+ except IndexError:
350
+ desired_channels = [0]
351
+
352
+ if target_item_ids is None:
353
+ target_item_ids = list(range(len(data_list)))
354
+
355
+ # Define window size
356
+ n_items = len(target_item_ids)
357
+ w, h = self.__set_grid(n_items, grid_instructions)
358
+ fig_size = fig_size if fig_size is not None else (6 * w, 6 * h)
359
+ if w * h < len(list(desired_channels)):
360
+ n_channels = w * h
361
+ desired_channels = list(desired_channels)[:n_channels]
362
+
363
+ for explainer_type in explainer_types:
364
+ for target_layer in target_layers:
365
+ for target_class in target_classes:
366
+ for i in range(n_items):
367
+ plt.figure(figsize=fig_size)
368
+ cam, item, batch_idx, item_key = self.__get_data_for_plots(data_list, i, target_item_ids,
369
+ cams_dict, explainer_type,
370
+ target_layer, target_class)
371
+
372
+ # Cross-CAM normalization
373
+ minimum = np.min(cam)
374
+ maximum = np.max(cam)
375
+
376
+ data_name = data_names[batch_idx] if data_names is not None else "item" + str(batch_idx)
377
+ desired_channels = desired_channels if desired_channels is not None else range(cam.shape[1])
378
+ for j in range(len(desired_channels)):
379
+ channel = desired_channels[j]
380
+ plt.subplot(w, h, j + 1)
381
+ try:
382
+ cam_j = cam[channel, :]
383
+ except IndexError:
384
+ cam_j = cam[0, :]
385
+ item_j = item[:, channel] if item.shape[0] == len(cam_j) else item[channel, :]
386
+ plt.plot(item_j, color="black", linewidth=line_width)
387
+ plt.scatter(np.arange(len(item_j)), item_j, c=cam_j, cmap="jet", marker=".",
388
+ s=marker_width, norm=None, vmin=minimum, vmax=maximum)
389
+ self.__set_colorbar(bar_ranges_dict[item_key], i)
390
+
391
+ if channel_names is None:
392
+ channel_names = ["Channel " + str(c) for c in desired_channels]
393
+ self.__set_axes(cam, data_sampling_freq, dt, channel_names, time_names,
394
+ axes_names=axes_names, only_x=True)
395
+ plt.title(channel_names[j])
396
+ plt.suptitle(self.__get_cam_title(data_name, target_class, data_labels, batch_idx, item_key,
397
+ predicted_probs_dict))
398
+
399
+ # Store or show CAM
400
+ self.__display_plot(results_dir_path, explainer_type, target_layer, target_class, data_name,
401
+ is_channel=True)
402
+
403
+ def _get_layers_pool(self, show: bool = False, extend_search: bool = False) \
404
+ -> Dict[str, torch.nn.Module | tf.keras.layers.Layer | Any]:
405
+ """
406
+ Retrieves a dictionary containing all the available PyTorch or TensorFlow/Keras layers (or instance attributes),
407
+ with the layer (or attribute) names used as keys.
408
+
409
+ :param show: (optional, default is False) A boolean flag indicating whether to display the retrieved layers
410
+ along with their names.
411
+ :param extend_search: (optional, default is False) A boolean flag indicating whether to deepend the search for
412
+ candidate layers. It should be set true if no convolutional layer was found.
413
+
414
+ :return:
415
+ - layers_pool: A dictionary storing the model's PyTorch or TensorFlow/Keras layers (or instance attributes),
416
+ with layer (or attribute) names as keys.
417
+ """
418
+
419
+ layers_pool = self.model.__dict__
420
+ if show:
421
+ for name, layer in layers_pool.items():
422
+ self._show_layer(name, layer, potential=True)
423
+
424
+ return layers_pool
425
+
426
+ def _show_layer(self, name: str, layer: torch.nn.Module | tf.keras.layers.Layer | Any, potential: bool = False) \
427
+ -> None:
428
+ """
429
+ Displays a single available layer (or instance attribute) in the model, along with its corresponding name.
430
+
431
+ :param name: (mandatory) A string representing the name of the layer or attribute.
432
+ :param layer: (mandatory) A PyTorch or TensorFlow/Keras layer, or an instance attribute in the model.
433
+ :param potential: (optional, default is False) A flag indicating whether the object displayed is potentially
434
+ a layer (i.e., a generic instance attribute, not guaranteed to be a layer).
435
+ """
436
+
437
+ addon = "(potential layer) " if potential else ""
438
+ txt = " - " + addon + f"{name}:\t{type(layer).__name__}"
439
+ print(txt)
440
+
441
+ def _create_raw_batched_cams(self, data_list: List[np.ndarray], target_class: int, target_layer: str,
442
+ explainer_type: str, softmax_final: bool) \
443
+ -> Tuple[List[np.ndarray], np.ndarray]:
444
+ """
445
+ Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
446
+ and target class). Additionally, it returns the class probabilities predicted by the model.
447
+
448
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
449
+ :param target_class: (mandatory) An integer representing the target class for the explanation.
450
+ :param target_layer: (mandatory) A string representing the target layer for the explanation. This string should
451
+ identify either PyTorch named modules, TensorFlow/Keras layers, or it should be a class dictionary key,
452
+ used to retrieve the layer from the class attributes.
453
+ :param explainer_type: (mandatory) A string representing the desired algorithm for the explanation. This string
454
+ should identify one of the CAM algorithms allowed, as listed by the class constructor.
455
+ :param softmax_final: (mandatory) A boolean indicating whether the network terminates with a Sigmoid/Softmax
456
+ activation function.
457
+
458
+ :return:
459
+ - cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
460
+ given setting (defined by algorithm, target layer, and target class).
461
+ - target_probs: A np.ndarray, representing the inferred class probabilities for each item in the input list.
462
+ """
463
+
464
+ raise AttributeError("The method '_create_raw_batched_cams' is not available for the parent class "
465
+ "'CamBuilder': you will need to instantiate either a 'TorchCamBuilder' or a 'TfCamBuilder'"
466
+ " instance to use it.")
467
+
468
+ def _get_gradcam_map(self, is_2d_layer: bool, batch_idx: int) -> torch.Tensor | tf.Tensor:
469
+ """
470
+ Compute the CAM using the vanilla Gradient-weighted Class Activation Mapping (Grad-CAM) algorithm.
471
+
472
+ :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
473
+ :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
474
+ list.
475
+
476
+ :return: cam: A PyTorch or TensorFlow/Keras tensor representing the Class Activation Map (CAM) for the
477
+ batch_idx-th input, built with the Grad-CAM algorithm.
478
+ """
479
+
480
+ raise AttributeError("The method '__get_gradcam_map' is not available for the parent class 'CamBuilder': you "
481
+ "will need to instantiate either a 'TorchCamBuilder' or a 'TfCamBuilder' instance to use "
482
+ "it.")
483
+
484
+ def _get_hirescam_map(self, is_2d_layer: bool, batch_idx: int) -> np.ndarray:
485
+ """
486
+ Compute the CAM using the High-Resolution Class Activation Mapping (HiResCAM) algorithm.
487
+
488
+ :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
489
+ :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
490
+ list.
491
+
492
+ :return: cam: A PyTorch or TensorFlow/Keras tensor representing the Class Activation Map (CAM) for the
493
+ batch_idx-th input, built with the HiResCAM algorithm.
494
+ """
495
+
496
+ raise AttributeError("The method '__get_hirecam_map' is not available for the parent class 'CamBuilder': you "
497
+ "will need to instantiate either a 'TorchCamBuilder' or a 'TfCamBuilder' instance to use "
498
+ "it.")
499
+
500
+ def __create_batched_cams(self, data_list: List[np.ndarray], target_class: int, target_layer: str,
501
+ explainer_type: str, softmax_final: bool, data_shape_list: List[Tuple[int, int]] = None) \
502
+ -> Tuple[List[np.ndarray], np.ndarray, Tuple[np.ndarray, np.ndarray]]:
503
+ """
504
+ Prepares the input data list and retrieves CAMs based on the specified settings (defined by algorithm, target
505
+ layer, and target class), along with class probabilities predicted by the model. Additionally, it adjusts the
506
+ output CAMs in both shape and value range (0-255), and returns the original importance score range.
507
+
508
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
509
+ :param target_class: (mandatory) An integer representing the target classe for the explanation.
510
+ :param target_layer: (mandatory) A string representing the target layer for the explanation. This string should
511
+ identify either PyTorch named modules, TensorFlow/Keras layers, or it should be a class dictionary key,
512
+ used to retrieve the layer from the class attributes.
513
+ :param explainer_type: (mandatory) A string representing the desired algorithm for the explanation. This string
514
+ should identify one of the CAM algorithms allowed, as listed by the class constructor.
515
+ :param softmax_final: (mandatory) A boolean indicating whether the network terminates with a Sigmoid/Softmax
516
+ activation function.
517
+ :param data_shape_list: (optional, default is None) A list of integer tuples storing the original input sizes,
518
+ used to set the CAM shape after resizing during preprocessing. The expected format is number of rows x
519
+ number of columns.
520
+
521
+ :return:
522
+ - cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
523
+ given setting (defined by algorithm, target layer, and target class).
524
+ - target_probs: A np.ndarray, representing the inferred class probabilities for each item in the input list.
525
+ - bar_ranges: A tuple containing two np.ndarrays, corresponding to the minimum and maximum importance scores
526
+ per CAM for each item in the input data list, based on a given setting (defined by algorithm, target
527
+ layer, and target class).
528
+ """
529
+
530
+ # Select target layer
531
+ layers_pool = self._get_layers_pool(extend_search=self.extend_search)
532
+ target_layer = layers_pool[target_layer]
533
+
534
+ # Preprocess data
535
+ if data_shape_list is None:
536
+ data_shape_list = [data_element.shape for data_element in data_list]
537
+ if self.transform_fn is not None:
538
+ data_list = [self.transform_fn(data_element) for data_element in data_list]
539
+
540
+ # Ensure data have consistent size for batching
541
+ if len(data_list) > 1 and self.padding_dim is None:
542
+ data_shape_list_processed = [data_element.shape for data_element in data_list]
543
+ if len(np.unique(np.array(data_shape_list_processed, dtype=object))) != 1:
544
+ data_list = [np.resize(x, data_shape_list_processed[0]) for x in data_list]
545
+ self.__print_justify("Input data items have different shapes. Each item has been reshaped to match the "
546
+ "first item's dimensions for batching. To prevent this, provide one item at a "
547
+ "time.")
548
+
549
+ cam_list, target_probs = self._create_raw_batched_cams(data_list, target_class, target_layer, explainer_type,
550
+ softmax_final)
551
+ self.activations = None
552
+ self.gradients = None
553
+ cams = np.stack(cam_list)
554
+ cam_list, bar_ranges = self.__adjust_maps(cams, data_shape_list, self._is_2d_layer(target_layer))
555
+ return cam_list, target_probs, bar_ranges
556
+
557
+ def __adjust_maps(self, cams: np.ndarray, data_shape_list: List[Tuple[int, int]], is_2d_layer: bool) \
558
+ -> Tuple[List[np.ndarray], Tuple[np.ndarray, np.ndarray]]:
559
+ """
560
+ Adjusts the output CAMs in both shape and value range (0-255), and returns the original importance score range.
561
+
562
+ :param cams: (mandatory) A np.ndarray representing a batch of CAMs, one per item in the input data batch.
563
+ :param data_shape_list: (mandatory) A list of integer tuples storing the original input sizes, used to set the
564
+ CAM shape after resizing during preprocessing. The expected format is number of rows x number of columns.
565
+ :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
566
+
567
+ :return:
568
+ - cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
569
+ given setting (defined by algorithm, target layer, and target class).
570
+ - bar_ranges: A tuple containing two np.ndarrays, corresponding to the minimum and maximum importance scores
571
+ per CAM for each item in the input data list, based on a given setting (defined by algorithm, target
572
+ layer, and target class).
573
+ """
574
+
575
+ cams, bar_ranges = self.__normalize_cams(cams, is_2d_layer)
576
+
577
+ cam_list = []
578
+ for i in range(len(data_shape_list)):
579
+ cam = cams[i]
580
+ if is_2d_layer:
581
+ dim_reshape = (data_shape_list[i][1], data_shape_list[i][0])
582
+ if self.input_transposed:
583
+ dim_reshape = dim_reshape[::-1]
584
+ else:
585
+ dim_reshape = (1, data_shape_list[i][self.time_axs])
586
+ if self.time_axs:
587
+ cam = np.transpose(cam)
588
+ if self.padding_dim is not None:
589
+ original_dim = dim_reshape[1]
590
+ dim_reshape = (dim_reshape[0], self.padding_dim)
591
+ cam = cv2.resize(cam, dim_reshape)
592
+
593
+ if is_2d_layer and self.input_transposed:
594
+ cam = np.transpose(cam)
595
+ if self.padding_dim is not None:
596
+ cam = cam[:original_dim, :]
597
+ cam_list.append(cam)
598
+
599
+ return cam_list, bar_ranges
600
+
601
+ def __display_output(self, data_labels: List[int], target_class: int, explainer_type: str, target_layer: str,
602
+ cam_list: List[np.ndarray], predicted_probs: np.ndarray, results_dir_path: str,
603
+ data_names: List[str], data_sampling_freq: float = None, dt: float = 10,
604
+ aspect_factor: float = 100, bar_ranges: Tuple[np.ndarray, np.ndarray] = None,
605
+ channel_names: List[str | float] = None, time_names: List[str | float] = None,
606
+ axes_names: Tuple[str | None, str | None] = None) -> None:
607
+ """
608
+ Create plots displaying the obtained CAMs, set their axes, and show them as multiple figures or as ".png" files.
609
+
610
+ :param data_labels: (mandatory) A list of integers representing the true labels of the data to be explained.
611
+ :param target_class: (mandatory) An integer representing the target class for the explanation.
612
+ :param explainer_type: (mandatory) A string representing the desired algorithm for the explanation. This string
613
+ should identify one of the CAM algorithms allowed, as listed by the class constructor.
614
+ :param target_layer: (mandatory) A string representing the target layer for the explanation. This string should
615
+ identify either PyTorch named modules, TensorFlow/Keras layers, or it should be a class dictionary key,
616
+ used to retrieve the layer from the class attributes.
617
+ :param cam_list: (mandatory) A list of np.ndarray containing CAMs for each item in the input data list,
618
+ corresponding to the given setting (defined by algorithm, target layer, and target class).
619
+ :param predicted_probs: (mandatory) A np.ndarray, representing the inferred class probabilities for each item in
620
+ the input list.
621
+ :param results_dir_path: (mandatory) A string representing the relative path to the directory
622
+ for storing results. If None, the output will be displayed in a figure.
623
+ :param data_names: (optional, default is None) A list of strings where each string represents the name of an
624
+ input item.
625
+ :param data_sampling_freq: (optional, default is None) A numerical value representing the sampling frequency of
626
+ signal inputs in samples per second.
627
+ :param dt: (optional, default is 10) A numerical value representing the granularity of the time axis in seconds
628
+ in the output display.
629
+ :param aspect_factor: (optional, default is 100) A numerical value to set the aspect ratio of the output signal
630
+ one-dimensional CAM.
631
+ :param bar_ranges: A tuple containing two np.ndarrays, corresponding to the minimum and maximum importance scores
632
+ per CAM for each item in the input data list, based on a given setting (defined by algorithm, target
633
+ layer, and target class).
634
+ :param channel_names: (optional, default is None) A list of strings where each string represents the name of a
635
+ signal channel for tick settings.
636
+ :param time_names: (optional, default is None) A list of strings representing tick names for the time axis.
637
+ :param axes_names: (optional, default is None) A tuple of strings representing names for X and Y axes,
638
+ respectively.
639
+ """
640
+
641
+ if not os.path.exists(results_dir_path):
642
+ os.makedirs(results_dir_path)
643
+
644
+ is_2d_layer = self._is_2d_layer(self._get_layers_pool(extend_search=self.extend_search)[target_layer])
645
+
646
+ n_cams = len(cam_list)
647
+ for i in range(n_cams):
648
+ map = cam_list[i]
649
+ data_name = data_names[i]
650
+
651
+ # Display CAM
652
+ plt.figure()
653
+ norm = self.__get_norm(map)
654
+
655
+ if map.shape[1] == 1:
656
+ aspect = int(map.shape[0] / aspect_factor)
657
+ map = np.transpose(map)
658
+ else:
659
+ if is_2d_layer:
660
+ aspect = "auto"
661
+ else:
662
+ aspect = 1
663
+ if not self.time_axs:
664
+ map = np.transpose(map)
665
+ plt.matshow(map, cmap=plt.get_cmap("jet"), norm=norm, aspect=aspect)
666
+
667
+ # Add color bar
668
+ self.__set_colorbar(bar_ranges, i)
669
+
670
+ # Set title
671
+ '''if map.shape[0] > 1 and is_2d_layer:
672
+ title_h = 1
673
+ plt.subplots_adjust(top=0.85, bottom=0.2)
674
+ else:
675
+ title_h = 0.98'''
676
+ plt.title("CAM for class '" + str(self.class_names[target_class]) + "' (confidence = " +
677
+ str(np.round(predicted_probs[i] * 100, 2)) + "%) - true label " +
678
+ str(self.class_names[data_labels[i]]))#, y=title_h)
679
+
680
+ # Set axis
681
+ self.__set_axes(map, data_sampling_freq, dt, channel_names, time_names=time_names, axes_names=axes_names)
682
+
683
+ # Store or show CAM
684
+ self.__display_plot(results_dir_path, explainer_type, target_layer, target_class, data_name)
685
+
686
+ def __get_data_for_plots(self, data_list: List[np.ndarray], i: int, target_item_ids: List[int],
687
+ cams_dict: Dict[str, List[np.ndarray]], explainer_type: str, target_layer: str,
688
+ target_class: int) -> Tuple[np.ndarray, np.ndarray, int, str]:
689
+ """
690
+ Prepares input data and CAMs to be plotted, identifying the string key to retrieve CAMs, probabilities and
691
+ ranges from the corresponding dictionaries.
692
+
693
+ :param data_list: (mandatory) A list of np.ndarrays to be explained, representing either a signal or an image.
694
+ :param i: (mandatory) An integer representing the index of an item among the selected ones.
695
+ :param target_item_ids: (optional, default is None) A list of integers representing the target item indices
696
+ among the items in the input data list.
697
+ :param cams_dict: (mandatory) A dictionary storing a list of CAMs. Each list contains CAMs for each item in the
698
+ input data list, corresponding to a given setting (defined by algorithm, target layer, and target class).
699
+ :param explainer_type: (mandatory) A string representing the desired algorithm for the explanation. This string
700
+ should identify one of the CAM algorithms allowed, as listed by the class constructor.
701
+ :param target_layer: (mandatory) A string representing the target layer for the explanation. This string should
702
+ identify either PyTorch named modules, TensorFlow/Keras layers, or it should be a class dictionary key,
703
+ used to retrieve the layer from the class attributes.
704
+ :param target_class: (mandatory) An integer representing the target class for the explanation.
705
+
706
+ :return:
707
+ - cam: The CAM for the given setting (defined by algorithm, target layer, and target class), corresponding
708
+ to the i-th item in the selected ones.
709
+ - item: The i-th item in the selected ones.
710
+ - batch_idx: The index corresponding to the i-th selected item within the original input data list.
711
+ - item_key: A string representing the considered setting (defined by algorithm, target layer, and target
712
+ class).
713
+ """
714
+ batch_idx = target_item_ids[i]
715
+ item = data_list[batch_idx]
716
+ item_key = explainer_type + "_" + target_layer + "_class" + str(target_class)
717
+ cam = cams_dict[item_key][batch_idx]
718
+
719
+ item_dims = item.shape
720
+ if len(item_dims) == 3:
721
+ if item_dims[0] == 1:
722
+ item = item[0]
723
+ elif item_dims[2] == 1:
724
+ item = item[:, :, 0]
725
+ elif len(item_dims) == 1:
726
+ item = item[np.newaxis, :]
727
+
728
+ if cam.shape[1] == 1 or cam.shape[1] > 1 and self.time_axs == 0:
729
+ item = np.transpose(item)
730
+ cam = np.transpose(cam)
731
+
732
+ return cam, item, batch_idx, item_key
733
+
734
+ def __set_axes(self, cam: np.ndarray, data_sampling_freq: float, dt: float, channel_names: List[str | float],
735
+ time_names: List[str | float], axes_names: Tuple[str | None, str | None] = None,
736
+ only_x: bool = False) -> None:
737
+ """
738
+ Sets the axes in the plot, including both tick marks and labels.
739
+
740
+ :param cam: (mandatory) The CAM for the given setting (defined by algorithm, target layer, and target class),
741
+ corresponding to the i-th item in the selected ones.
742
+ :param data_sampling_freq: (mandatory) A numerical value representing the sampling frequency of signal inputs,
743
+ in samples per second.
744
+ :param dt: (mandatory) A numerical value representing the granularity of the time axis in seconds in the output
745
+ display.
746
+ :param channel_names: (mandatory) A list of strings where each string represents the name of a signal channel
747
+ for tick settings.
748
+ :param time_names: (mandatory) A list of strings representing tick names for the time axis.
749
+ :param axes_names: (optional, default is None) A tuple of strings representing names for X and Y axes,
750
+ respectively.
751
+ :param only_x: (optional, default is False) A boolean flag indicating whether only the X axis should be set.
752
+ """
753
+
754
+ # Set X-axis
755
+ if data_sampling_freq is not None:
756
+ time_steps, points = self.__get_time_axis(cam, data_sampling_freq, dt)
757
+ plt.xticks(ticks=points, labels=time_steps, fontsize=8)
758
+ plt.xlabel("Time (s)")
759
+ elif data_sampling_freq is None and cam.shape[0] == 1:
760
+ plt.xlabel("Sample index")
761
+ else:
762
+ if time_names is None:
763
+ plt.xticks([], [])
764
+ else:
765
+ desired_times = len(time_names)
766
+ map_times = cam.shape[1]
767
+ desired_ticks = [c / desired_times * map_times for c in range(desired_times)]
768
+ plt.xticks(desired_ticks, time_names, fontsize=8)
769
+
770
+ # Set Y-axis
771
+ if not only_x:
772
+ if cam.shape[0] > 1 and channel_names is not None:
773
+ desired_channels = len(channel_names)
774
+ map_channels = cam.shape[0]
775
+ desired_ticks = [c / desired_channels * map_channels for c in range(desired_channels)]
776
+ plt.yticks(desired_ticks, channel_names, rotation=0, fontsize=7)
777
+ plt.ylabel("Signal channels")
778
+ else:
779
+ plt.yticks([], [])
780
+
781
+ if axes_names is not None:
782
+ if axes_names[0] is not None:
783
+ plt.xlabel(axes_names[0])
784
+ if axes_names[1] is not None:
785
+ plt.ylabel(axes_names[1])
786
+
787
+ def __get_cam_title(self, item_name: str, target_class: int, data_labels: List[int], batch_idx: int, item_key: str,
788
+ predicted_probs: Dict[str, np.ndarray]) -> str:
789
+ """
790
+ Builds the CAM title for a given item and target class.
791
+
792
+ :param item_name: (mandatory) A string representing the name of an input item.
793
+ :param target_class: (mandatory) An integer representing the target class for the explanation.
794
+ :param data_labels: (mandatory) A list of integers representing the true labels of the data to be explained.
795
+ :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
796
+ list.
797
+ :param item_key: (mandatory) A string representing the considered setting (defined by algorithm, target layer,
798
+ and target class).
799
+ :param predicted_probs: (mandatory) A np.ndarray, representing the inferred class probabilities for each item in
800
+ the input list.
801
+
802
+ :return:
803
+ - title: A string representing the title of the CAM for a given item and target class.
804
+ """
805
+
806
+ title = ("'" + item_name + "': CAM for class '" + self.class_names[target_class] + "' (confidence = " +
807
+ str(np.round(predicted_probs[item_key][batch_idx] * 100, 2)) + "%) - true class " +
808
+ self.class_names[data_labels[batch_idx]])
809
+ return title
810
+
811
+ def __display_plot(self, results_dir_path: str, explainer_type: str, target_layer: str, target_class: int,
812
+ item_name: str = None, is_channel: bool = False) -> None:
813
+ """
814
+ Show one CAM plot as a figure or as a ".png" file.
815
+
816
+ :param results_dir_path: (mandatory) A string representing the relative path to the directory for storing
817
+ results. If None, the output will be displayed in a figure.
818
+ :param explainer_type: (mandatory) A string representing the desired algorithm for the explanation. This string
819
+ should identify one of the CAM algorithms allowed, as listed by the class constructor.
820
+ :param target_layer: (mandatory) A string representing the target layer for the explanation. This string should
821
+ identify either PyTorch named modules, TensorFlow/Keras layers, or it should be a class dictionary key,
822
+ used to retrieve the layer from the class attributes.
823
+ :param target_class: (mandatory) An integer representing the target class for the explanation.
824
+ :param item_name: (optional, default is False) A string representing the name of an input item.
825
+ :param is_channel: (optional, default is False) A boolean flag indicating whether the figure represents graphs
826
+ of multiple input channels, to discriminate it from other display modalities.
827
+ """
828
+
829
+ if is_channel:
830
+ name_addon = "channel_graphs_"
831
+ descr_addon = "single-channel "
832
+ elif item_name is None:
833
+ name_addon = "results_"
834
+ descr_addon = "overlapped "
835
+ else:
836
+ name_addon = ""
837
+ descr_addon = ""
838
+
839
+ if results_dir_path is not None:
840
+ filepath = results_dir_path
841
+ if item_name is not None:
842
+ data_name = str(item_name).replace(" ", "_").lower()
843
+ filepath = os.path.join(filepath, data_name)
844
+ if data_name not in os.listdir(results_dir_path):
845
+ os.mkdir(filepath)
846
+ filename = (name_addon + explainer_type + "_" + re.sub(r"\W", "_", target_layer) + "_class" +
847
+ str(target_class) + ".png")
848
+
849
+ # Communicate outcome
850
+ descr_addon1 = "for item '" + item_name + "' " if item_name is not None else ""
851
+ self.__print_justify("Storing " + descr_addon + "output display " + descr_addon1 + "(class " +
852
+ self.class_names[target_class] + ", layer " + target_layer + ", algorithm " + explainer_type +
853
+ ") as '" + filename + "'...")
854
+
855
+ plt.savefig(os.path.join(filepath, filename), format="png", bbox_inches="tight", pad_inches=0,
856
+ dpi=500)
857
+ plt.close()
858
+ else:
859
+ plt.show()
860
+
861
+ @staticmethod
862
+ def _is_2d_layer(target_layer: torch.nn.Module | tf.keras.layers.Layer) -> bool:
863
+ """
864
+ Evaluates whether the target layer is a 2D-convolutional layer.
865
+
866
+ :param target_layer: (mandatory) A PyTorch module or a TensorFlow/Keras layer.
867
+
868
+ :return:
869
+ - is_2d_layer: A boolean indicating whether the target layers 2D-convolutional layer.
870
+ """
871
+
872
+ raise ValueError(target_layer + " must be a 1D or 2D convolutional layer.")
873
+
874
+ @staticmethod
875
+ def __normalize_cams(cams: np.ndarray, is_2d_layer: bool) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
876
+ """
877
+ Adjusts the CAMs in value range (0-255), and returns the original importance score range.
878
+
879
+ :param cams: (mandatory) A np.ndarray representing a batch of raw CAMs, one per item in the input data batch.
880
+ :param is_2d_layer: (mandatory) A boolean indicating whether the target layers 2D-convolutional layer.
881
+
882
+ :return:
883
+ - cams: A np.ndarray representing a batch of CAMs (normalised in the range 0-255), one per item in the input
884
+ data batch.
885
+ - bar_ranges: A tuple containing two np.ndarrays, corresponding to the minimum and maximum importance scores
886
+ per CAM for each item in the input data list, based on a given setting (defined by algorithm, target
887
+ layer, and target class).
888
+ """
889
+
890
+ if is_2d_layer:
891
+ axis = (1, 2)
892
+ else:
893
+ axis = 1
894
+ maxima = np.max(cams, axis=axis, keepdims=True)
895
+ minima = np.min(cams, axis=axis, keepdims=True)
896
+
897
+ is_uniform = maxima == minima
898
+ cams = np.where(is_uniform,
899
+ np.ones_like(cams) * np.where(maxima == 1, 1, 0),
900
+ np.divide(cams - minima, maxima - minima, where=(maxima - minima) != 0))
901
+
902
+ cams = np.uint8(255 * cams)
903
+ bar_ranges = (minima, maxima)
904
+ return cams, bar_ranges
905
+
906
+ @staticmethod
907
+ def __get_time_axis(cam: np.ndarray, data_sampling_freq: float, dt: float = 10) -> Tuple[List[str], np.ndarray]:
908
+ """
909
+ Returns the X axis ticks for a given CAM.
910
+
911
+ :param cam: (mandatory) A np.ndarray representing a CAM.
912
+ :param data_sampling_freq: (mandatory) A numerical value representing the sampling frequency of signal inputs,
913
+ in samples per second.
914
+ :param dt: (optional, default is 10) A numerical value representing the granularity of the time axis in seconds
915
+ in the output display.
916
+
917
+ :return:
918
+ - time_steps: A list containing the X axis ticks, in seconds.
919
+ - points: A list containing the X axis ticks, in number of samples.
920
+ """
921
+
922
+ max_p = cam.shape[1]
923
+ points = np.arange(0, max_p + 1, np.ceil(dt * data_sampling_freq))
924
+ time_steps = [str(p / data_sampling_freq) for p in points]
925
+
926
+ return time_steps, points
927
+
928
+ @staticmethod
929
+ def __set_colorbar(bar_ranges: Tuple[np.ndarray, np.ndarray] = None, batch_idx: int = None) -> None:
930
+ """
931
+ Sets the colorbar describing a CAM, representing extreme colors as minimum and maximum importance score values.
932
+
933
+ :param bar_ranges: (optional, default is None) A tuple containing two np.ndarrays, corresponding to the minimum
934
+ and maximum importance scores per CAM for each item in the input data list, based on a given setting
935
+ (defined by algorithm, target layer, and target class).
936
+ :param batch_idx: (mandatory) The index corresponding to the i-th selected item within the original input data
937
+ list.
938
+ """
939
+
940
+ bar_range = [bar_ranges[0][batch_idx], bar_ranges[1][batch_idx]]
941
+ cbar = plt.colorbar()
942
+ if bar_range is not None:
943
+ minimum = float(bar_range[0])
944
+ maximum = float(bar_range[1])
945
+ min_str = str(minimum) if minimum == 0 else "{:.2e}".format(minimum)
946
+ max_str = "" if maximum == minimum else "{:.2e}".format(maximum)
947
+ cbar.ax.get_yaxis().set_ticks([cbar.vmin, cbar.vmax], labels=[min_str, max_str])
948
+
949
+ @staticmethod
950
+ def __check_input_types(target_classes: int | List[int], explainer_types: str | List[str],
951
+ target_layers: str | List[str]) -> Tuple[List[int], List[str], List[str]]:
952
+ """
953
+ Checks whether the setting specifics (target classes, explainer algorithms, and target layers) are provided
954
+ as lists of values. If not, they are transformed into a list.
955
+
956
+ :param target_classes: (mandatory) An integer or a list of integers representing the target classes for the
957
+ explanation.
958
+ :param explainer_types: (mandatory) A string or a list of strings representing the desired algorithms for the
959
+ explanation. These strings should identify one of the CAM algorithms allowed, as listed by the class
960
+ constructor.
961
+ :param target_layers: (mandatory) A string or a list of strings representing the target layers for the
962
+ explanations. These strings should identify either PyTorch named modules, TensorFlow/Keras layers, or they
963
+ should be class dictionary keys, used to retrieve each layer from the class attributes.
964
+
965
+ :return:
966
+ - target_classes: A list of integers representing the target classes for the explanation.
967
+ - explainer_types: A list of strings representing the desired algorithms for the explanation. These strings
968
+ should identify one of the CAM algorithms allowed, as listed by the class constructor.
969
+ - target_layers: A list of strings representing the target layers for the explanations. These strings should
970
+ identify either PyTorch named modules, TensorFlow/Keras layers, or they should be class dictionary keys,
971
+ used to retrieve each layer from the class attributes.
972
+ """
973
+
974
+ if not isinstance(target_classes, list):
975
+ target_classes = [target_classes]
976
+ if not isinstance(explainer_types, list):
977
+ explainer_types = [explainer_types]
978
+ if not isinstance(target_layers, list):
979
+ target_layers = [target_layers]
980
+
981
+ return target_classes, explainer_types, target_layers
982
+
983
+ @staticmethod
984
+ def __set_grid(n_items: int, grid_instructions: Tuple[int, int]) -> Tuple[int, int]:
985
+ """
986
+ Computes number of columns (width) and number of rows (height) for the tabular layout of the figure subplots.
987
+
988
+ :param n_items: (mandatory) The number of target item among the items in the input data list.
989
+ :param grid_instructions: (optional, default is None) A tuple of integers defining the desired tabular layout
990
+ for figure subplots. The expected format is number of columns (width) x number of rows (height).
991
+
992
+ :return:
993
+ - w: The number of columns (width) of the desired tabular layout for figure subplots.
994
+ - h: The number of rows (height) of the desired tabular layout for figure subplots.
995
+ """
996
+
997
+ if grid_instructions is not None:
998
+ w, h = grid_instructions
999
+ else:
1000
+ w, h = n_items, 1
1001
+
1002
+ return w, h
1003
+
1004
+ @staticmethod
1005
+ def __get_norm(cam: np.ndarray) -> m_colors.Normalize | None:
1006
+ """
1007
+ Determines the eventual normalization for the given CAM. The objective is to ensure that meaningless CAMs (with
1008
+ all zero values) receive a blue coloration.
1009
+
1010
+ :param cam: (mandatory) A np.ndarray representing a CAM.
1011
+
1012
+ :return:
1013
+ - norm: A matplotlib.colors.Normalize object if the CAM has all zero values, or None.
1014
+ """
1015
+
1016
+ if len(np.unique(cam)) == 1 and np.unique(cam) == 0:
1017
+ norm = m_colors.Normalize(vmin=0, vmax=255)
1018
+ else:
1019
+ norm = None
1020
+
1021
+ return norm
1022
+
1023
+ @staticmethod
1024
+ def __print_justify(text: str, n_characters: int = 100) -> None:
1025
+ """
1026
+ Prints a message in a fully justified format within a specified line width.
1027
+
1028
+ :param text: (mandatory) Text string to be displayed.
1029
+ :param n_characters: (optional, default is 100) The number of characters allowed per line.
1030
+ """
1031
+ text = "\n".join(text[i:i + n_characters] for i in range(0, len(text), n_characters))
1032
+ print(text)