signal-grad-cam 0.1.3__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of signal-grad-cam might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: signal_grad_cam
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.
5
5
  Home-page: https://github.com/samuelepe11/signal_grad_cam
6
6
  Author: Samuele Pe
@@ -5,7 +5,7 @@ with open("README.md", "r") as f:
5
5
 
6
6
  setup(
7
7
  name="signal_grad_cam",
8
- version="0.1.3",
8
+ version="0.1.4",
9
9
  description="SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability"
10
10
  " and efficiency.",
11
11
  keywords="XAI, class activation maps, CNN, time series",
@@ -110,7 +110,7 @@ class CamBuilder:
110
110
  channel_names: List[str | float] = None, results_dir_path: str = None, aspect_factor: float = 100,
111
111
  data_shape_list: List[Tuple[int, int]] = None, extra_preprocess_inputs_list: List[List[Any]] = None,
112
112
  extra_inputs_list: List[Any] = None, time_names: List[str | float] = None,
113
- axes_names: Tuple[str | None, str | None] | List[str | None] = None) \
113
+ axes_names: Tuple[str | None, str | None] | List[str | None] = None, eps: float = 1e-6) \
114
114
  -> Tuple[Dict[str, List[np.ndarray]], Dict[str, np.ndarray], Dict[str, Tuple[np.ndarray, np.ndarray]]]:
115
115
  """
116
116
  Allows the user to request Class Activation Maps (CAMs) for a given list of inputs, a set of algorithms,
@@ -153,6 +153,8 @@ class CamBuilder:
153
153
  :param time_names: (optional, default is None) A list of strings representing tick names for the time axis.
154
154
  :param axes_names: (optional, default is None) A tuple of strings representing names for X and Y axes,
155
155
  respectively.
156
+ :param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
157
+ to avoid null or None results.
156
158
 
157
159
  :return:
158
160
  - cams_dict: A dictionary storing a list of CAMs. Each list contains CAMs for each item in the input data
@@ -189,7 +191,8 @@ class CamBuilder:
189
191
  data_shape_list=data_shape_list,
190
192
  extra_preprocess_inputs_list=
191
193
  extra_preprocess_inputs_list,
192
- extra_inputs_list=extra_inputs_list)
194
+ extra_inputs_list=extra_inputs_list,
195
+ esp=eps)
193
196
  item_key = explainer_type + "_" + target_layer + "_class" + str(target_class)
194
197
  cams_dict.update({item_key: cam_list})
195
198
  predicted_probs_dict.update({item_key: output_probs})
@@ -456,7 +459,8 @@ class CamBuilder:
456
459
 
457
460
  def _create_raw_batched_cams(self, data_list: List[np.ndarray | torch.Tensor | tf.Tensor], target_class: int,
458
461
  target_layer: str, explainer_type: str, softmax_final: bool,
459
- extra_inputs_list: List[Any] = None) -> Tuple[List[np.ndarray], np.ndarray]:
462
+ extra_inputs_list: List[Any] = None, eps: float = 1e-6) \
463
+ -> Tuple[List[np.ndarray], np.ndarray]:
460
464
  """
461
465
  Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
462
466
  and target class). Additionally, it returns the class probabilities predicted by the model.
@@ -472,6 +476,8 @@ class CamBuilder:
472
476
  activation function.
473
477
  :param extra_inputs_list: (optional, defaults is None) A list of additional input objects required by the
474
478
  model's forward method.
479
+ :param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
480
+ to avoid null or None results.
475
481
 
476
482
  :return:
477
483
  - cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
@@ -517,7 +523,8 @@ class CamBuilder:
517
523
 
518
524
  def __create_batched_cams(self, data_list: List[np.ndarray], target_class: int, target_layer: str,
519
525
  explainer_type: str, softmax_final: bool, data_shape_list: List[Tuple[int, int]] = None,
520
- extra_preprocess_inputs_list: List[List[Any]] = None, extra_inputs_list: List[Any] = None) \
526
+ extra_preprocess_inputs_list: List[List[Any]] = None, extra_inputs_list: List[Any] = None,
527
+ eps: float = 1e-6) \
521
528
  -> Tuple[List[np.ndarray], np.ndarray, Tuple[np.ndarray, np.ndarray]]:
522
529
  """
523
530
  Prepares the input data list and retrieves CAMs based on the specified settings (defined by algorithm, target
@@ -540,7 +547,9 @@ class CamBuilder:
540
547
  represents the additional input objects required by the preprocessing method for the i-th input.
541
548
  :param extra_inputs_list: (optional, default is None) A list of additional input objects required by the model's
542
549
  forward method.
543
- 0
550
+ :param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
551
+ to avoid null or None results.
552
+
544
553
  :return:
545
554
  - cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
546
555
  given setting (defined by algorithm, target layer, and target class).
@@ -574,7 +583,8 @@ class CamBuilder:
574
583
  "time.")
575
584
 
576
585
  cam_list, target_probs = self._create_raw_batched_cams(data_list, target_class, target_layer, explainer_type,
577
- softmax_final, extra_inputs_list=extra_inputs_list)
586
+ softmax_final, extra_inputs_list=extra_inputs_list,
587
+ eps=eps)
578
588
  self.activations = None
579
589
  self.gradients = None
580
590
  cams = np.stack(cam_list)
@@ -897,7 +907,7 @@ class CamBuilder:
897
907
  - is_2d_layer: A boolean indicating whether the target layers 2D-convolutional layer.
898
908
  """
899
909
 
900
- raise ValueError(target_layer + " must be a 1D or 2D convolutional layer.")
910
+ raise ValueError(str(target_layer) + " must be a 1D or 2D convolutional layer.")
901
911
 
902
912
  @staticmethod
903
913
  def __normalize_cams(cams: np.ndarray, is_2d_layer: bool) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
@@ -125,7 +125,8 @@ class TorchCamBuilder(CamBuilder):
125
125
 
126
126
  def _create_raw_batched_cams(self, data_list: List[np.ndarray | torch.Tensor], target_class: int,
127
127
  target_layer: nn.Module, explainer_type: str, softmax_final: bool,
128
- extra_inputs_list: List[Any] = None) -> Tuple[List[np.ndarray], np.ndarray]:
128
+ extra_inputs_list: List[Any] = None, eps: float = 1e-6) \
129
+ -> Tuple[List[np.ndarray], np.ndarray]:
129
130
  """
130
131
  Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
131
132
  and target class). Additionally, it returns the class probabilities predicted by the model.
@@ -141,6 +142,8 @@ class TorchCamBuilder(CamBuilder):
141
142
  activation function.
142
143
  :param extra_inputs_list: (optional, defaults is None) A list of additional input objects required by the
143
144
  model's forward method.
145
+ :param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
146
+ to avoid null or None results.
144
147
 
145
148
  :return:
146
149
  - cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
@@ -184,30 +187,32 @@ class TorchCamBuilder(CamBuilder):
184
187
  outputs = outputs[self.model_output_index]
185
188
 
186
189
  if softmax_final:
187
- # Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
188
- # during derivation. Clamp probabilities before log application to avoid null maps for maximum confidence.
189
- target_scores = torch.log(torch.clamp(outputs, min=0, max=1 - 1e-6))
190
190
  target_probs = outputs
191
-
192
- # Adjust results for binary network
193
- if len(outputs.shape) == 1:
194
- target_scores = torch.stack([-target_scores, target_scores], dim=1)
195
- target_probs = torch.stack([1 - target_probs, target_probs], dim=1)
196
- elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
197
- target_scores = torch.cat([-target_scores, target_scores], dim=1)
198
- target_probs = torch.cat([1 - target_probs, target_probs], dim=1)
191
+ if len(outputs.shape) == 2 and outputs.shape[1] > 1:
192
+ # Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
193
+ # during derivation. Clamp probabilities before log application to avoid null maps for maximum
194
+ # confidence.
195
+ target_scores = torch.log(torch.clamp(outputs, min=eps, max=1 - eps))
196
+ else:
197
+ # Adjust results for binary networks
198
+ target_scores = torch.logit(outputs, eps=eps)
199
+ if len(outputs.shape) == 1:
200
+ target_scores = torch.stack([-target_scores, target_scores], dim=1)
201
+ target_probs = torch.stack([1 - target_probs, target_probs], dim=1)
202
+ else:
203
+ target_scores = torch.cat([-target_scores, target_scores], dim=1)
204
+ target_probs = torch.cat([1 - target_probs, target_probs], dim=1)
199
205
  else:
200
- if len(outputs.shape) == 1:
201
- outputs = torch.stack([-outputs, outputs], dim=1)
202
- elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
203
- outputs = torch.cat([-outputs, outputs], dim=1)
204
206
  target_scores = outputs
205
-
206
207
  if len(outputs.shape) == 2 and outputs.shape[1] > 1:
207
208
  target_probs = torch.softmax(target_scores, dim=1)
208
209
  else:
209
- tmp = torch.sigmoid(target_scores[:, 1])
210
- target_probs = torch.stack([1 - tmp, tmp], dim=1)
210
+ if len(outputs.shape) == 1:
211
+ target_scores = torch.stack([-outputs, outputs], dim=1)
212
+ elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
213
+ target_scores = torch.cat([-outputs, outputs], dim=1)
214
+ p = torch.sigmoid(outputs)
215
+ target_probs = torch.stack([1 - p, p], dim=1)
211
216
 
212
217
  target_probs = target_probs[:, target_class].cpu().detach().numpy()
213
218
 
@@ -150,7 +150,8 @@ class TfCamBuilder(CamBuilder):
150
150
 
151
151
  def _create_raw_batched_cams(self, data_list: List[np.ndarray | tf.Tensor], target_class: int,
152
152
  target_layer: tf.keras.layers.Layer, explainer_type: str, softmax_final: bool,
153
- extra_inputs_list: List[Any] = None) -> Tuple[List[np.ndarray], np.ndarray]:
153
+ extra_inputs_list: List[Any] = None, eps: float = 1e-6) \
154
+ -> Tuple[List[np.ndarray], np.ndarray]:
154
155
  """
155
156
  Retrieves raw CAMs from an input data list based on the specified settings (defined by algorithm, target layer,
156
157
  and target class). Additionally, it returns the class probabilities predicted by the model.
@@ -166,6 +167,8 @@ class TfCamBuilder(CamBuilder):
166
167
  activation function.
167
168
  :param extra_inputs_list: (optional, defaults is None) A list of additional input objects required by the
168
169
  model's forward method.
170
+ :param eps: (optional, default is 1e-6) A float number used in probability clamping before logarithm application
171
+ to avoid null or None results.
169
172
 
170
173
  :return:
171
174
  - cam_list: A list of np.ndarray containing CAMs for each item in the input data list, corresponding to the
@@ -199,31 +202,32 @@ class TfCamBuilder(CamBuilder):
199
202
  self.activations, outputs = grad_model([data_batch] + extra_inputs_list)
200
203
 
201
204
  if softmax_final:
202
- # Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
203
- # during derivation. Clamp probabilities before log application to avoid null maps for maximum
204
- # confidence.
205
- target_scores = tf.math.log(tf.clip_by_value(outputs, 0, 1.0 - 1e-6))
206
205
  target_probs = outputs
207
-
208
- # Adjust results for binary network
209
- if len(outputs.shape) == 1:
210
- target_scores = tf.stack([-target_scores, target_scores], axis=1)
211
- target_probs = tf.stack([1 - target_probs, target_probs], axis=1)
212
- elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
213
- target_scores = tf.concat([-target_scores, target_scores], axis=1)
214
- target_probs = tf.concat([1 - target_probs, target_probs], axis=1)
206
+ if len(outputs.shape) == 2 and outputs.shape[1] > 1:
207
+ # Approximate Softmax inversion formula logit = log(prob) + constant, as the constant is negligible
208
+ # during derivation. Clamp probabilities before log application to avoid null maps for maximum
209
+ # confidence.
210
+ target_scores = tf.math.log(tf.clip_by_value(outputs, eps, 1.0 - eps))
211
+ else:
212
+ # Adjust results for binary network
213
+ target_scores = tf.math.logit(outputs, eps=eps)
214
+ if len(outputs.shape) == 1:
215
+ target_scores = tf.stack([-target_scores, target_scores], axis=1)
216
+ target_probs = tf.stack([1 - target_probs, target_probs], axis=1)
217
+ else:
218
+ target_scores = tf.concat([-target_scores, target_scores], axis=1)
219
+ target_probs = tf.concat([1 - target_probs, target_probs], axis=1)
215
220
  else:
216
- if len(outputs.shape) == 1:
217
- outputs = tf.stack([-outputs, outputs], axis=1)
218
- elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
219
- outputs = tf.concat([-outputs, outputs], axis=1)
220
221
  target_scores = outputs
221
-
222
222
  if len(outputs.shape) == 2 and outputs.shape[1] > 1:
223
223
  target_probs = tf.nn.softmax(target_scores, axis=1)
224
224
  else:
225
- tmp = tf.math.sigmoid(target_scores[:, 1])
226
- target_probs = tf.stack([1 - tmp, tmp], axis=1)
225
+ if len(outputs.shape) == 1:
226
+ target_scores = tf.stack([-outputs, outputs], axis=1)
227
+ elif len(outputs.shape) == 2 and outputs.shape[1] == 1:
228
+ target_scores = tf.concat([-outputs, outputs], axis=1)
229
+ p = tf.math.sigmoid(outputs)
230
+ target_probs = tf.stack([1 - p, p], axis=1)
227
231
 
228
232
  target_scores = target_scores[:, target_class]
229
233
  target_probs = target_probs[:, target_class]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: signal-grad-cam
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: SignalGrad-CAM aims at generalising Grad-CAM to one-dimensional applications, while enhancing usability and efficiency.
5
5
  Home-page: https://github.com/samuelepe11/signal_grad_cam
6
6
  Author: Samuele Pe
File without changes