pytorch-ignite 0.6.0.dev20250324__py3-none-any.whl → 0.6.0.dev20251103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pytorch-ignite might be problematic. Click here for more details.

Files changed (37) hide show
  1. ignite/__init__.py +1 -1
  2. ignite/distributed/comp_models/native.py +1 -1
  3. ignite/engine/__init__.py +9 -9
  4. ignite/engine/engine.py +30 -4
  5. ignite/handlers/__init__.py +2 -0
  6. ignite/handlers/base_logger.py +47 -12
  7. ignite/handlers/checkpoint.py +44 -3
  8. ignite/handlers/clearml_logger.py +18 -6
  9. ignite/handlers/fbresearch_logger.py +2 -2
  10. ignite/handlers/lr_finder.py +1 -1
  11. ignite/handlers/mlflow_logger.py +43 -0
  12. ignite/handlers/neptune_logger.py +7 -0
  13. ignite/handlers/polyaxon_logger.py +7 -0
  14. ignite/handlers/tensorboard_logger.py +43 -0
  15. ignite/handlers/tqdm_logger.py +2 -3
  16. ignite/handlers/visdom_logger.py +9 -2
  17. ignite/handlers/wandb_logger.py +7 -1
  18. ignite/metrics/clustering/calinski_harabasz_score.py +1 -1
  19. ignite/metrics/clustering/silhouette_score.py +1 -1
  20. ignite/metrics/fbeta.py +17 -8
  21. ignite/metrics/gan/fid.py +3 -3
  22. ignite/metrics/js_divergence.py +1 -1
  23. ignite/metrics/maximum_mean_discrepancy.py +1 -1
  24. ignite/metrics/metric.py +2 -0
  25. ignite/metrics/nlp/bleu.py +8 -6
  26. ignite/metrics/nlp/rouge.py +3 -3
  27. ignite/metrics/nlp/utils.py +1 -1
  28. ignite/metrics/precision_recall_curve.py +5 -5
  29. ignite/metrics/regression/_base.py +4 -0
  30. ignite/metrics/regression/fractional_bias.py +1 -1
  31. ignite/metrics/roc_auc.py +3 -3
  32. ignite/metrics/ssim.py +58 -20
  33. {pytorch_ignite-0.6.0.dev20250324.dist-info → pytorch_ignite-0.6.0.dev20251103.dist-info}/METADATA +11 -17
  34. {pytorch_ignite-0.6.0.dev20250324.dist-info → pytorch_ignite-0.6.0.dev20251103.dist-info}/RECORD +36 -37
  35. {pytorch_ignite-0.6.0.dev20250324.dist-info → pytorch_ignite-0.6.0.dev20251103.dist-info}/WHEEL +1 -2
  36. pytorch_ignite-0.6.0.dev20250324.dist-info/top_level.txt +0 -1
  37. {pytorch_ignite-0.6.0.dev20250324.dist-info → pytorch_ignite-0.6.0.dev20251103.dist-info/licenses}/LICENSE +0 -0
@@ -86,7 +86,7 @@ class SilhouetteScore(_ClusteringMetricBase):
86
86
 
87
87
  .. testoutput::
88
88
 
89
- 0.12607366
89
+ 0.1260736584663391
90
90
 
91
91
  .. versionadded:: 0.5.2
92
92
  """
ignite/metrics/fbeta.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Callable, Optional, Union
1
+ from typing import Callable, cast, Optional, Union
2
2
 
3
3
  import torch
4
4
 
@@ -15,7 +15,7 @@ def Fbeta(
15
15
  precision: Optional[Precision] = None,
16
16
  recall: Optional[Recall] = None,
17
17
  output_transform: Optional[Callable] = None,
18
- device: Union[str, torch.device] = torch.device("cpu"),
18
+ device: Optional[Union[str, torch.device]] = None,
19
19
  ) -> MetricsLambda:
20
20
  r"""Calculates F-beta score.
21
21
 
@@ -143,17 +143,26 @@ def Fbeta(
143
143
  if not (beta > 0):
144
144
  raise ValueError(f"Beta should be a positive integer, but given {beta}")
145
145
 
146
- if precision is not None and output_transform is not None:
147
- raise ValueError("If precision argument is provided, output_transform should be None")
146
+ if precision is not None:
147
+ if output_transform is not None:
148
+ raise ValueError("If precision argument is provided, output_transform should be None")
149
+ if device is not None:
150
+ raise ValueError("If precision argument is provided, device should be None")
148
151
 
149
- if recall is not None and output_transform is not None:
150
- raise ValueError("If recall argument is provided, output_transform should be None")
152
+ if recall is not None:
153
+ if output_transform is not None:
154
+ raise ValueError("If recall argument is provided, output_transform should be None")
155
+ if device is not None:
156
+ raise ValueError("If recall argument is provided, device should be None")
157
+
158
+ if precision is None and recall is None and device is None:
159
+ device = torch.device("cpu")
151
160
 
152
161
  if precision is None:
153
162
  precision = Precision(
154
163
  output_transform=(lambda x: x) if output_transform is None else output_transform,
155
164
  average=False,
156
- device=device,
165
+ device=cast(Union[str, torch.device], recall._device if recall else device),
157
166
  )
158
167
  elif precision._average:
159
168
  raise ValueError("Input precision metric should have average=False")
@@ -162,7 +171,7 @@ def Fbeta(
162
171
  recall = Recall(
163
172
  output_transform=(lambda x: x) if output_transform is None else output_transform,
164
173
  average=False,
165
- device=device,
174
+ device=cast(Union[str, torch.device], precision._device if precision else device),
166
175
  )
167
176
  elif recall._average:
168
177
  raise ValueError("Input recall metric should have average=False")
ignite/metrics/gan/fid.py CHANGED
@@ -31,13 +31,13 @@ def fid_score(
31
31
  except ImportError:
32
32
  raise ModuleNotFoundError("fid_score requires scipy to be installed.")
33
33
 
34
- mu1, mu2 = mu1.cpu(), mu2.cpu()
35
- sigma1, sigma2 = sigma1.cpu(), sigma2.cpu()
34
+ mu1, mu2 = mu1.detach().cpu(), mu2.detach().cpu()
35
+ sigma1, sigma2 = sigma1.detach().cpu(), sigma2.detach().cpu()
36
36
 
37
37
  diff = mu1 - mu2
38
38
 
39
39
  # Product might be almost singular
40
- covmean, _ = scipy.linalg.sqrtm(sigma1.mm(sigma2), disp=False)
40
+ covmean, _ = scipy.linalg.sqrtm(sigma1.mm(sigma2).numpy(), disp=False)
41
41
  # Numerical error might give slight imaginary component
42
42
  if np.iscomplexobj(covmean):
43
43
  if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
@@ -73,7 +73,7 @@ class JSDivergence(KLDivergence):
73
73
 
74
74
  .. testoutput::
75
75
 
76
- 0.16266516844431558
76
+ 0.1626...
77
77
 
78
78
  .. versionchanged:: 0.5.1
79
79
  ``skip_unrolling`` argument is added.
@@ -78,7 +78,7 @@ class MaximumMeanDiscrepancy(Metric):
78
78
 
79
79
  .. testoutput::
80
80
 
81
- 1.072697639465332
81
+ 1.0726...
82
82
 
83
83
  .. versionchanged:: 0.5.1
84
84
  ``skip_unrolling`` argument is added.
ignite/metrics/metric.py CHANGED
@@ -361,6 +361,8 @@ class Metric(Serializable, metaclass=ABCMeta):
361
361
  device: Union[str, torch.device] = torch.device("cpu"),
362
362
  skip_unrolling: bool = False,
363
363
  ):
364
+ if not callable(output_transform):
365
+ raise TypeError(f"Argument output_transform should be callable, got {type(output_transform)}")
364
366
  self._output_transform = output_transform
365
367
 
366
368
  # Some metrics have a large performance regression when run on XLA devices, so for now, we disallow it.
@@ -2,6 +2,7 @@ import math
2
2
  from typing import Any, Callable, Sequence, Tuple, Union
3
3
 
4
4
  import torch
5
+ from torch import Tensor
5
6
 
6
7
  from ignite.exceptions import NotComputableError
7
8
  from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
@@ -71,11 +72,11 @@ class Bleu(Metric):
71
72
 
72
73
  More details can be found in `Papineni et al. 2002`__.
73
74
 
74
- __ https://www.aclweb.org/anthology/P02-1040
75
+ __ https://aclanthology.org/P02-1040/
75
76
 
76
77
  In addition, a review of smoothing techniques can be found in `Chen et al. 2014`__
77
78
 
78
- __ https://aclanthology.org/W14-3346.pdf
79
+ __ https://aclanthology.org/W14-3346/
79
80
 
80
81
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
81
82
  - `y_pred` (list(list(str))) - a list of hypotheses sentences.
@@ -236,12 +237,12 @@ class Bleu(Metric):
236
237
  @reinit__is_reduced
237
238
  def reset(self) -> None:
238
239
  if self.average == "macro":
239
- self._sum_of_bleu = torch.tensor(0.0, dtype=torch.double, device=self._device)
240
+ self._sum_of_bleu = torch.tensor(0.0, dtype=self._double_dtype, device=self._device)
240
241
  self._num_sentences = 0
241
242
 
242
243
  if self.average == "micro":
243
- self.p_numerators = torch.zeros(self.ngrams_order + 1)
244
- self.p_denominators = torch.zeros(self.ngrams_order + 1)
244
+ self.p_numerators = torch.zeros(self.ngrams_order + 1, dtype=self._double_dtype)
245
+ self.p_denominators = torch.zeros(self.ngrams_order + 1, dtype=self._double_dtype)
245
246
  self.hyp_length_sum = 0
246
247
  self.ref_length_sum = 0
247
248
 
@@ -278,8 +279,9 @@ class Bleu(Metric):
278
279
  )
279
280
  return bleu_score
280
281
 
281
- def compute(self) -> None:
282
+ def compute(self) -> Union[None, Tensor, float]:
282
283
  if self.average == "macro":
283
284
  return self._compute_macro()
284
285
  elif self.average == "micro":
285
286
  return self._compute_micro()
287
+ return None
@@ -191,7 +191,7 @@ class RougeN(_BaseRouge):
191
191
 
192
192
  More details can be found in `Lin 2004`__.
193
193
 
194
- __ https://www.aclweb.org/anthology/W04-1013.pdf
194
+ __ https://aclanthology.org/W04-1013
195
195
 
196
196
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
197
197
  - `y_pred` (list(list(str))) must be a sequence of tokens.
@@ -265,7 +265,7 @@ class RougeL(_BaseRouge):
265
265
 
266
266
  More details can be found in `Lin 2004`__.
267
267
 
268
- __ https://www.aclweb.org/anthology/W04-1013.pdf
268
+ __ https://aclanthology.org/W04-1013
269
269
 
270
270
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
271
271
  - `y_pred` (list(list(str))) must be a sequence of tokens.
@@ -331,7 +331,7 @@ class Rouge(Metric):
331
331
 
332
332
  More details can be found in `Lin 2004`__.
333
333
 
334
- __ https://www.aclweb.org/anthology/W04-1013.pdf
334
+ __ https://aclanthology.org/W04-1013
335
335
 
336
336
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
337
337
  - `y_pred` (list(list(str))) must be a sequence of tokens.
@@ -63,7 +63,7 @@ def modified_precision(references: Sequence[Sequence[Any]], candidate: Any, n: i
63
63
 
64
64
  More details can be found in `Papineni et al. 2002`__.
65
65
 
66
- __ https://www.aclweb.org/anthology/P02-1040.pdf
66
+ __ https://aclanthology.org/P02-1040
67
67
 
68
68
  Args:
69
69
  references: list of references R
@@ -97,7 +97,7 @@ class PrecisionRecallCurve(EpochMetric):
97
97
  if len(self._predictions) < 1 or len(self._targets) < 1:
98
98
  raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
99
99
 
100
- if self._result is None: # type: ignore
100
+ if self._result is None:
101
101
  _prediction_tensor = torch.cat(self._predictions, dim=0)
102
102
  _target_tensor = torch.cat(self._targets, dim=0)
103
103
 
@@ -110,11 +110,11 @@ class PrecisionRecallCurve(EpochMetric):
110
110
  if idist.get_rank() == 0:
111
111
  # Run compute_fn on zero rank only
112
112
  precision, recall, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
113
- precision = torch.tensor(precision, device=_prediction_tensor.device)
114
- recall = torch.tensor(recall, device=_prediction_tensor.device)
113
+ precision = torch.tensor(precision, device=_prediction_tensor.device, dtype=self._double_dtype)
114
+ recall = torch.tensor(recall, device=_prediction_tensor.device, dtype=self._double_dtype)
115
115
  # thresholds can have negative strides, not compatible with torch tensors
116
116
  # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
117
- thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device)
117
+ thresholds = torch.tensor(thresholds.copy(), device=_prediction_tensor.device, dtype=self._double_dtype)
118
118
  else:
119
119
  precision, recall, thresholds = None, None, None
120
120
 
@@ -126,4 +126,4 @@ class PrecisionRecallCurve(EpochMetric):
126
126
 
127
127
  self._result = (precision, recall, thresholds) # type: ignore[assignment]
128
128
 
129
- return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result) # type: ignore
129
+ return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result)
@@ -30,6 +30,10 @@ def _check_output_types(output: Tuple[torch.Tensor, torch.Tensor]) -> None:
30
30
 
31
31
 
32
32
  def _torch_median(output: torch.Tensor) -> float:
33
+ # torch.kthvalue used later is not supported on MPS
34
+ if output.device.type == "mps":
35
+ output = output.cpu()
36
+
33
37
  output = output.view(-1)
34
38
  len_ = len(output)
35
39
 
@@ -64,7 +64,7 @@ class FractionalBias(_BaseRegression):
64
64
 
65
65
  @reinit__is_reduced
66
66
  def reset(self) -> None:
67
- self._sum_of_errors = torch.tensor(0.0, dtype=torch.double, device=self._device)
67
+ self._sum_of_errors = torch.tensor(0.0, dtype=self._double_dtype, device=self._device)
68
68
  self._num_examples = 0
69
69
 
70
70
  def _update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
ignite/metrics/roc_auc.py CHANGED
@@ -198,9 +198,9 @@ class RocCurve(EpochMetric):
198
198
  if idist.get_rank() == 0:
199
199
  # Run compute_fn on zero rank only
200
200
  fpr, tpr, thresholds = cast(Tuple, self.compute_fn(_prediction_tensor, _target_tensor))
201
- fpr = torch.tensor(fpr, device=_prediction_tensor.device)
202
- tpr = torch.tensor(tpr, device=_prediction_tensor.device)
203
- thresholds = torch.tensor(thresholds, device=_prediction_tensor.device)
201
+ fpr = torch.tensor(fpr, dtype=self._double_dtype, device=_prediction_tensor.device)
202
+ tpr = torch.tensor(tpr, dtype=self._double_dtype, device=_prediction_tensor.device)
203
+ thresholds = torch.tensor(thresholds, dtype=self._double_dtype, device=_prediction_tensor.device)
204
204
  else:
205
205
  fpr, tpr, thresholds = None, None, None
206
206
 
ignite/metrics/ssim.py CHANGED
@@ -21,9 +21,9 @@ class SSIM(Metric):
21
21
 
22
22
  Args:
23
23
  data_range: Range of the image. Typically, ``1.0`` or ``255``.
24
- kernel_size: Size of the kernel. Default: (11, 11)
24
+ kernel_size: Size of the kernel. Default: 11
25
25
  sigma: Standard deviation of the gaussian kernel.
26
- Argument is used if ``gaussian=True``. Default: (1.5, 1.5)
26
+ Argument is used if ``gaussian=True``. Default: 1.5
27
27
  k1: Parameter of SSIM. Default: 0.01
28
28
  k2: Parameter of SSIM. Default: 0.03
29
29
  gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel
@@ -36,6 +36,7 @@ class SSIM(Metric):
36
36
  skip_unrolling: specifies whether output should be unrolled before being fed to update method. Should be
37
37
  true for multi-output model, for example, if ``y_pred`` contains multi-ouput as ``(y_pred_a, y_pred_b)``
38
38
  Alternatively, ``output_transform`` can be used to handle this.
39
+ ndims: Number of dimensions of the input image: 2d or 3d. Accepted values: 2, 3. Default: 2
39
40
 
40
41
  Examples:
41
42
  To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
@@ -68,6 +69,8 @@ class SSIM(Metric):
68
69
 
69
70
  .. versionchanged:: 0.5.1
70
71
  ``skip_unrolling`` argument is added.
72
+ .. versionchanged:: 0.5.2
73
+ ``ndims`` argument is added.
71
74
  """
72
75
 
73
76
  _state_dict_all_req_keys = ("_sum_of_ssim", "_num_examples", "_kernel")
@@ -75,28 +78,36 @@ class SSIM(Metric):
75
78
  def __init__(
76
79
  self,
77
80
  data_range: Union[int, float],
78
- kernel_size: Union[int, Sequence[int]] = (11, 11),
79
- sigma: Union[float, Sequence[float]] = (1.5, 1.5),
81
+ kernel_size: Union[int, Sequence[int]] = 11,
82
+ sigma: Union[float, Sequence[float]] = 1.5,
80
83
  k1: float = 0.01,
81
84
  k2: float = 0.03,
82
85
  gaussian: bool = True,
83
86
  output_transform: Callable = lambda x: x,
84
87
  device: Union[str, torch.device] = torch.device("cpu"),
85
88
  skip_unrolling: bool = False,
89
+ ndims: int = 2,
86
90
  ):
91
+ if ndims not in (2, 3):
92
+ raise ValueError(f"Expected ndims to be 2 or 3. Got {ndims}.")
93
+
87
94
  if isinstance(kernel_size, int):
88
- self.kernel_size: Sequence[int] = [kernel_size, kernel_size]
95
+ self.kernel_size: Sequence[int] = [kernel_size for _ in range(ndims)]
89
96
  elif isinstance(kernel_size, Sequence):
97
+ if len(kernel_size) != ndims:
98
+ raise ValueError(f"Expected kernel_size to have length of {ndims}. Got {len(kernel_size)}.")
90
99
  self.kernel_size = kernel_size
91
100
  else:
92
- raise ValueError("Argument kernel_size should be either int or a sequence of int.")
101
+ raise ValueError(f"Argument kernel_size should be either int or a sequence of int of length {ndims}.")
93
102
 
94
103
  if isinstance(sigma, float):
95
- self.sigma: Sequence[float] = [sigma, sigma]
104
+ self.sigma: Sequence[float] = [sigma for _ in range(ndims)]
96
105
  elif isinstance(sigma, Sequence):
106
+ if len(sigma) != ndims:
107
+ raise ValueError(f"Expected sigma to have length of {ndims}. Got {len(sigma)}.")
97
108
  self.sigma = sigma
98
109
  else:
99
- raise ValueError("Argument sigma should be either float or a sequence of float.")
110
+ raise ValueError(f"Argument sigma should be either float or a sequence of float of length {ndims}.")
100
111
 
101
112
  if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):
102
113
  raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.")
@@ -111,7 +122,13 @@ class SSIM(Metric):
111
122
  self.c2 = (k2 * data_range) ** 2
112
123
  self.pad_h = (self.kernel_size[0] - 1) // 2
113
124
  self.pad_w = (self.kernel_size[1] - 1) // 2
114
- self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
125
+ self.pad_d = None
126
+ self.ndims = ndims
127
+ if self.ndims == 3:
128
+ self.pad_d = (self.kernel_size[2] - 1) // 2
129
+ self._kernel_nd = self._gaussian_or_uniform_kernel(
130
+ kernel_size=self.kernel_size, sigma=self.sigma, ndims=self.ndims
131
+ )
115
132
  self._kernel: Optional[torch.Tensor] = None
116
133
 
117
134
  @reinit__is_reduced
@@ -128,23 +145,34 @@ class SSIM(Metric):
128
145
  min_, max_ = -2.5, 2.5
129
146
  kernel[start_uniform_index:end_uniform_index] = 1 / (max_ - min_)
130
147
 
131
- return kernel.unsqueeze(dim=0) # (1, kernel_size)
148
+ return kernel # (kernel_size)
132
149
 
133
150
  def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:
134
151
  ksize_half = (kernel_size - 1) * 0.5
135
152
  kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
136
153
  gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))
137
- return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
154
+ return gauss / gauss.sum() # (kernel_size)
138
155
 
139
- def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:
156
+ def _gaussian_or_uniform_kernel(
157
+ self, kernel_size: Sequence[int], sigma: Sequence[float], ndims: int
158
+ ) -> torch.Tensor:
140
159
  if self.gaussian:
141
160
  kernel_x = self._gaussian(kernel_size[0], sigma[0])
142
161
  kernel_y = self._gaussian(kernel_size[1], sigma[1])
162
+ if ndims == 3:
163
+ kernel_z = self._gaussian(kernel_size[2], sigma[2])
143
164
  else:
144
165
  kernel_x = self._uniform(kernel_size[0])
145
166
  kernel_y = self._uniform(kernel_size[1])
167
+ if ndims == 3:
168
+ kernel_z = self._uniform(kernel_size[2])
146
169
 
147
- return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
170
+ result = (
171
+ torch.einsum("i,j->ij", kernel_x, kernel_y)
172
+ if ndims == 2
173
+ else torch.einsum("i,j,k->ijk", kernel_x, kernel_y, kernel_z)
174
+ )
175
+ return result
148
176
 
149
177
  def _check_type_and_shape(self, y_pred: torch.Tensor, y: torch.Tensor) -> None:
150
178
  if y_pred.dtype != y.dtype:
@@ -157,9 +185,11 @@ class SSIM(Metric):
157
185
  f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
158
186
  )
159
187
 
160
- if len(y_pred.shape) != 4 or len(y.shape) != 4:
188
+ # 2 dimensions are reserved for batch and channel
189
+ if len(y_pred.shape) - 2 != self.ndims or len(y.shape) - 2 != self.ndims:
161
190
  raise ValueError(
162
- f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
191
+ "Expected y_pred and y to have BxCxHxW or BxCxDxHxW shape. "
192
+ f"Got y_pred: {y_pred.shape} and y: {y.shape}."
163
193
  )
164
194
 
165
195
  @reinit__is_reduced
@@ -176,7 +206,7 @@ class SSIM(Metric):
176
206
 
177
207
  nb_channel = y_pred.size(1)
178
208
  if self._kernel is None or self._kernel.shape[0] != nb_channel:
179
- self._kernel = self._kernel_2d.expand(nb_channel, 1, -1, -1)
209
+ self._kernel = self._kernel_nd.expand(nb_channel, 1, *[-1 for _ in range(self.ndims)])
180
210
 
181
211
  if y_pred.device != self._kernel.device:
182
212
  if self._kernel.device == torch.device("cpu"):
@@ -191,14 +221,19 @@ class SSIM(Metric):
191
221
  y_pred = y_pred.to(device=self._kernel.device)
192
222
  y = y.to(device=self._kernel.device)
193
223
 
194
- y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
195
- y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
224
+ padding_shape = [self.pad_w, self.pad_w, self.pad_h, self.pad_h]
225
+ if self.ndims == 3 and self.pad_d is not None:
226
+ padding_shape.extend([self.pad_d, self.pad_d])
227
+
228
+ y_pred = F.pad(y_pred, padding_shape, mode="reflect")
229
+ y = F.pad(y, padding_shape, mode="reflect")
196
230
 
197
231
  if y_pred.dtype != self._kernel.dtype:
198
232
  self._kernel = self._kernel.to(dtype=y_pred.dtype)
199
233
 
200
234
  input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y]
201
- outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=nb_channel)
235
+ conv_op = F.conv3d if self.ndims == 3 else F.conv2d
236
+ outputs = conv_op(torch.cat(input_list), self._kernel, groups=nb_channel)
202
237
  batch_size = y_pred.size(0)
203
238
  output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))]
204
239
 
@@ -224,7 +259,10 @@ class SSIM(Metric):
224
259
  if ssim_idx.device.type == "mps" and self._double_dtype == torch.float64:
225
260
  double_dtype = torch.float32
226
261
 
227
- self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=double_dtype).sum().to(device=self._device)
262
+ # mean from all dimensions except batch
263
+ self._sum_of_ssim += (
264
+ torch.mean(ssim_idx, list(range(1, 2 + self.ndims)), dtype=double_dtype).sum().to(device=self._device)
265
+ )
228
266
 
229
267
  self._num_examples += y.shape[0]
230
268
 
@@ -1,23 +1,17 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: pytorch-ignite
3
- Version: 0.6.0.dev20250324
3
+ Version: 0.6.0.dev20251103
4
4
  Summary: A lightweight library to help with training neural networks in PyTorch.
5
- Home-page: https://github.com/pytorch/ignite
6
- Author: PyTorch-Ignite Team
7
- Author-email: contact@pytorch-ignite.ai
8
- License: BSD
9
- Description-Content-Type: text/markdown
5
+ Project-URL: Homepage, https://pytorch-ignite.ai
6
+ Project-URL: Repository, https://github.com/pytorch/ignite
7
+ Author-email: PyTorch-Ignite Team <contact@pytorch-ignite.ai>
8
+ License-Expression: BSD-3-Clause
10
9
  License-File: LICENSE
11
- Requires-Dist: torch<3,>=1.3
10
+ Classifier: Programming Language :: Python :: 3
11
+ Requires-Python: <=3.13,>=3.9
12
12
  Requires-Dist: packaging
13
- Dynamic: author
14
- Dynamic: author-email
15
- Dynamic: description
16
- Dynamic: description-content-type
17
- Dynamic: home-page
18
- Dynamic: license
19
- Dynamic: requires-dist
20
- Dynamic: summary
13
+ Requires-Dist: torch<3,>=1.10
14
+ Description-Content-Type: text/markdown
21
15
 
22
16
  <div align="center">
23
17
 
@@ -418,7 +412,7 @@ Few pointers to get you started:
418
412
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pytorch/ignite/blob/master/examples/notebooks/FastaiLRFinder_MNIST.ipynb) [Basic example of LR finder on
419
413
  MNIST](https://github.com/pytorch/ignite/blob/master/examples/notebooks/FastaiLRFinder_MNIST.ipynb)
420
414
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pytorch/ignite/blob/master/examples/notebooks/Cifar100_bench_amp.ipynb) [Benchmark mixed precision training on Cifar100:
421
- torch.cuda.amp vs nvidia/apex](https://github.com/pytorch/ignite/blob/master/examples/notebooks/Cifar100_bench_amp.ipynb)
415
+ torch.amp vs nvidia/apex](https://github.com/pytorch/ignite/blob/master/examples/notebooks/Cifar100_bench_amp.ipynb)
422
416
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pytorch/ignite/blob/master/examples/notebooks/MNIST_on_TPU.ipynb) [MNIST training on a single
423
417
  TPU](https://github.com/pytorch/ignite/blob/master/examples/notebooks/MNIST_on_TPU.ipynb)
424
418
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1E9zJrptnLJ_PKhmaP5Vhb6DTVRvyrKHx) [CIFAR10 Training on multiple TPUs](https://github.com/pytorch/ignite/tree/master/examples/cifar10)
@@ -1,4 +1,4 @@
1
- ignite/__init__.py,sha256=u53gJrQ1tKolHq-S8V3qTfCJ0Wnlh2uEx_j19_tjg1I,194
1
+ ignite/__init__.py,sha256=tyL6Ulkx95oYbGTYtecqCVA424B4eABtJNaYIY5ujds,194
2
2
  ignite/_utils.py,sha256=XDPpUDJ8ykLXWMV2AYTqGSj8XCfApsyzsQ3Vij_OB4M,182
3
3
  ignite/exceptions.py,sha256=5ZWCVLPC9rgoW8t84D-VeEleqz5O7XpAGPpCdU8rKd0,150
4
4
  ignite/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -52,36 +52,36 @@ ignite/distributed/utils.py,sha256=D97JwWgL9RKP8rTfDRf1zMmfRUeJizr7XfLZ8LAScOI,2
52
52
  ignite/distributed/comp_models/__init__.py,sha256=S2WHl463U7BvpcUe9-JaGtuCi3G1cMHFW5QFBQ6fv20,1357
53
53
  ignite/distributed/comp_models/base.py,sha256=pTIylP1h2g6NWopBEponfXC6UefqS1l2lEdzTUTNXFc,14185
54
54
  ignite/distributed/comp_models/horovod.py,sha256=Mn5xG92bzBT1Dd-0IxEaN342kihUwJNkLCqsxP06ijc,9076
55
- ignite/distributed/comp_models/native.py,sha256=2kiHxJjjoS1GHs8GRuHcQQqnoz-yBhxGwks9Hv6SFI8,28087
55
+ ignite/distributed/comp_models/native.py,sha256=oxJeK-Dc_dtLpG02wz29y5BvjJv_a3vEXPlRVRLWEIU,28098
56
56
  ignite/distributed/comp_models/xla.py,sha256=kVa5HrXaWt8TCLObxUMVpSsUnDv3SPOQ0dP2SlqpmLg,6281
57
- ignite/engine/__init__.py,sha256=ASvoTDzjl0ix7ZqHq_N3cDCNUy33gD_UUfs8-u9zni4,36126
57
+ ignite/engine/__init__.py,sha256=MRFj6yywKhVkov4ccPkrw4dX1O8PfqceiJkngrcFb7A,36094
58
58
  ignite/engine/deterministic.py,sha256=lIacEolZPmfPgVWM_T0Eqg-2G08Wpi_hc1fifzFq0p8,11632
59
- ignite/engine/engine.py,sha256=zu7hpdKbmIUnGGW1uWVqxCHBseQkiNdeKAvT57A5zK8,59815
59
+ ignite/engine/engine.py,sha256=xeUx7QkjC4p6iLMCTzFDU7Pq7PXoaoFaRf8TcpIuSog,60915
60
60
  ignite/engine/events.py,sha256=ydfG3HPMo3HKcycFSG_GrZ199Tuttcjmd85eQaV_5c0,21807
61
61
  ignite/engine/utils.py,sha256=QG5mkdg4OipspqgpNQcJuoHTYdr2Sx5LS16kfjOHDdI,1073
62
- ignite/handlers/__init__.py,sha256=t2UbfFcFNjR_zh3_WHoa0N3n-n_oD1jibbJw0JC3LsE,2641
63
- ignite/handlers/base_logger.py,sha256=3FXA0-4T2c0SXvJ1d2GE4yHN_B-Z4KTMdE4YlERZ-9U,11793
64
- ignite/handlers/checkpoint.py,sha256=xg0bEShzdAg7SKx8CLAPiWAy7pHi2Jm-EAQKb0NJpyo,44882
65
- ignite/handlers/clearml_logger.py,sha256=jZZ3t1m7LlbKZFuuMmfbf5Usaz8rf8aCnB0V5MOQCjg,37473
62
+ ignite/handlers/__init__.py,sha256=Qq85YTtHPcii6UAfMOoCPg9RwigH96iqxOJKIlRfDqw,2728
63
+ ignite/handlers/base_logger.py,sha256=wPiGn9iCh5ung1GaRUf_qAlqe63h1NpUUQ0XK709p2k,13011
64
+ ignite/handlers/checkpoint.py,sha256=1k_RhDW5rjkJB4oz2jNWxjpuGgLvbNCt7_-1Pqz9Lxg,46266
65
+ ignite/handlers/clearml_logger.py,sha256=12a9eue6hnFh5CrdSFz_EpGF0-XKRMlBXpR2NWWw8DY,37949
66
66
  ignite/handlers/early_stopping.py,sha256=UA6TiKho5CbD085R-16H8w3r0BYPQcWQjhEXg8aITSw,4139
67
67
  ignite/handlers/ema_handler.py,sha256=SmUyyWIFPZW3yMvjD_sSk5m_LfnMFl9R-uQdbXNFfY0,11854
68
- ignite/handlers/fbresearch_logger.py,sha256=onsUIHv6lYWcGf3VNeTWDCL_s1igK_PXmLGaTksjyk4,11120
69
- ignite/handlers/lr_finder.py,sha256=LdyBDQEg193mgTWJZHtZ8jP-L3giJSqSE3ffyo-wDoo,22117
70
- ignite/handlers/mlflow_logger.py,sha256=mMxPnBqJRcolYMtbBE8U32qneOdiwZOzOa7tEveon1Q,12311
71
- ignite/handlers/neptune_logger.py,sha256=LictGhcBeUNU5HrtmX3xi1qliNY1xMWPmGIgFC8Wzdo,27001
68
+ ignite/handlers/fbresearch_logger.py,sha256=MfQeiBIXBYLEwZoDIld2oCceMeTAsz8rc5cd7fLtpJs,11133
69
+ ignite/handlers/lr_finder.py,sha256=AAQLUcRLrfkrVOQufmRNZqAOTw1MpmAAo8YLk3JkdQs,22145
70
+ ignite/handlers/mlflow_logger.py,sha256=M5Mggrnr2wMsms8wbEaHqNtTk5L1zNs1MlPWD0ZCpDQ,13894
71
+ ignite/handlers/neptune_logger.py,sha256=SrehLclS8ccyuxO_0HYPvt5SN8EB8g9KWFfqQMQsGAw,27298
72
72
  ignite/handlers/param_scheduler.py,sha256=c730LIS6obDNNH2jitc2BRDK6AO36FfD3e1x336Oen4,68261
73
- ignite/handlers/polyaxon_logger.py,sha256=USJuycaEggP3J45fReP8-mwQeoWr-lKuGgNXJweRB0I,12056
73
+ ignite/handlers/polyaxon_logger.py,sha256=5b7Zxhksne8Ufg_SBTG-rlf_9CPSjkBQOJR4-ynoZnQ,12354
74
74
  ignite/handlers/state_param_scheduler.py,sha256=xBOF07_JVexafmC-k4ifL_nN31IF8ThbebGWIxlbLs8,20745
75
75
  ignite/handlers/stores.py,sha256=8XM_Qqsitfu0WtOOE-K2FMtv51vD90r3GgQlCzRABYc,2616
76
- ignite/handlers/tensorboard_logger.py,sha256=BWcAo2dn7HCl0zVORHCKaSvtbLDBQbhkbiDEu86MRiU,26381
76
+ ignite/handlers/tensorboard_logger.py,sha256=q3YxXkbIFayBggI_kcHyl-upttVVjjnqFOLgyjj2cRo,27967
77
77
  ignite/handlers/terminate_on_nan.py,sha256=RFSKd3Oqn9Me2xLCos4lSE-hnY7fYWWjE9blioeMlIs,2103
78
78
  ignite/handlers/time_limit.py,sha256=heTuS-ReBbOUCm1NcNJGhzxI080Hanc4hOLB2Y4GeZk,1567
79
79
  ignite/handlers/time_profilers.py,sha256=GZCoOpiFSc2yVgHQjpS1To8Yjb6G6HwydsiWMjwMQfA,30301
80
80
  ignite/handlers/timing.py,sha256=nHeBHvPwYdPRMAx-jk_8MjZit4a7rmsmIWkUrajAG-s,4705
81
- ignite/handlers/tqdm_logger.py,sha256=Yu8GBtTrKlO8H6_b9VS9obWJ_TIJ7p62OI_VwkGYdN4,13040
81
+ ignite/handlers/tqdm_logger.py,sha256=5N70XA9rRm2x6sWYAJB1U5Y_bky2fa3qhec8SVgB3hY,13049
82
82
  ignite/handlers/utils.py,sha256=X4LRqo1kqGsbmX0pEuZKYR6K4C8sZudAqxCLriiXtCg,872
83
- ignite/handlers/visdom_logger.py,sha256=zDMjyBcVVHYExQFdxbrEXhoy37lpYuEXm_b7XL2JCEk,21551
84
- ignite/handlers/wandb_logger.py,sha256=0TQoOzjjexr7nE_eO3Aoo4PA5vSZJBnXK3uEuXRrtaA,14392
83
+ ignite/handlers/visdom_logger.py,sha256=sg75ohEkDT7gYfEbLn5464GO-s0MLUEWxdFw_zSVSYw,21830
84
+ ignite/handlers/wandb_logger.py,sha256=gGvbFNjc6gCfVFfOXcnz3-P4sqqP-P9at1UwUV_mwMg,14701
85
85
  ignite/metrics/__init__.py,sha256=m-8F8J17r-aEwsO6Ww-8AqDRN59WFfYBwCDKwqGDSmI,3627
86
86
  ignite/metrics/accumulation.py,sha256=xWdsm9u6JfsfODX_GUKzQc_omrdFDJ4yELBR-xXgc4s,12448
87
87
  ignite/metrics/accuracy.py,sha256=rI1TG-7WdJxcqGCMxGErXBWLmTNP1yczJgjjRyby0No,10168
@@ -92,50 +92,50 @@ ignite/metrics/confusion_matrix.py,sha256=dZDuK3vxrrbiQh6VfyV5aWFpuTJWsfnZ30Mxt6
92
92
  ignite/metrics/cosine_similarity.py,sha256=myq1iGFBBUgEhyOg_ZxkOqUQpS6FYAc3PAcnObc3Dp4,4429
93
93
  ignite/metrics/entropy.py,sha256=gJZkR5Sl1ZdIzJ9pFkydf1186bZU8OnkOLvOtKz6Wrs,4511
94
94
  ignite/metrics/epoch_metric.py,sha256=H4PVsDtcqk53l47Ehc3kliKT4QtyZUf600ut-8rRP8M,7050
95
- ignite/metrics/fbeta.py,sha256=Ioq_cscx5N8oF77B7QYkPMzGFlp0KtFtYXHgfJ1twO8,6376
95
+ ignite/metrics/fbeta.py,sha256=2oDsRM7XXJ8LPVrn7iwLdRy75RLJELijmshtMQO3mJM,6870
96
96
  ignite/metrics/frequency.py,sha256=NW01rPgWnW1chVOSNAPCcPBu2CvjyXkoyFDAmjOK9A4,4037
97
97
  ignite/metrics/gpu_info.py,sha256=kcDIifr9js_P-32LddizEggvvL6eqFLYCHYeFDR4GL0,4301
98
98
  ignite/metrics/hsic.py,sha256=am-gor2mXY3H3u2vVNQGPJtkx_5W5JNZeukl2uYqajE,7099
99
- ignite/metrics/js_divergence.py,sha256=cng6KpR7I7znEEUcr434kJQ2uswgo5wbT5pAAcyVqdI,4838
99
+ ignite/metrics/js_divergence.py,sha256=HAgj12JwL9bT33cCSAX7g4EKSfqFNNehkgwZfJuncfw,4828
100
100
  ignite/metrics/kl_divergence.py,sha256=FdC5BT-nd8nmYqT95Xozw-hW0hZC6dtTklkpJdwWJ6o,5152
101
101
  ignite/metrics/loss.py,sha256=mB-zYptymtcyIys0OlbVgUOAqL2WHT2dCPMFda-Klpo,4818
102
- ignite/metrics/maximum_mean_discrepancy.py,sha256=FVBt-IJNrZo_zV0vwpsjaShDhl4Z5nJWVLRjTV-GpoE,6452
102
+ ignite/metrics/maximum_mean_discrepancy.py,sha256=AcrlYW6seQn3ZQKcnPIrLzYK2Ho0riGjuRsJmTNtCms,6444
103
103
  ignite/metrics/mean_absolute_error.py,sha256=gfbzoXNdyj9GCEzSxHXn0359TNNjnKBYshSnCBQk7i4,3695
104
104
  ignite/metrics/mean_average_precision.py,sha256=cXP9pYidQnAazGXBrhC80WoI4eK4lb3avNO5d70TLd4,19136
105
105
  ignite/metrics/mean_pairwise_distance.py,sha256=Ys6Rns6s-USS_tyP6Pa3bWZSI7f_hP5-lZM64UGJGjo,4104
106
106
  ignite/metrics/mean_squared_error.py,sha256=QdxXMYzxltfciMMRxxK5JhdlKXsdHe370EzwvIbwSmA,3679
107
- ignite/metrics/metric.py,sha256=IZucnzQL-HZVWa-Ppta8V1mw67XC-UP8cIA3DJwaP6o,34953
107
+ ignite/metrics/metric.py,sha256=3dv3vy-YTgzC5aIZgLCW6pdiPzF5GqgKpqeSreEifA8,35103
108
108
  ignite/metrics/metric_group.py,sha256=UE7WrMbpKlO9_DPqxQdlmFAWveWoT1knKwRlHDl9YIU,2544
109
109
  ignite/metrics/metrics_lambda.py,sha256=NwKZ1J-KzFFbSw7YUaNJozdfKZLVqrkjQvFKT6ixnkg,7309
110
110
  ignite/metrics/multilabel_confusion_matrix.py,sha256=1pjLNPGTDJWAkN_BHdBPekcish6Ra0uRUeEbdj3Dm6Y,7377
111
111
  ignite/metrics/mutual_information.py,sha256=lu1ucVfkx01tGQfELyXzS9woCPOMVImFHfrbIXCvPe8,4692
112
112
  ignite/metrics/precision.py,sha256=xe8_e13cPMaC1Mfw-RTlmkag6pdcHCIbi70ASI1IahY,18622
113
- ignite/metrics/precision_recall_curve.py,sha256=2Gqv5B_Q5xP-mVlX1bmM5XZNnUTRdGG-MJZZvVcbQxc,6182
113
+ ignite/metrics/precision_recall_curve.py,sha256=rcmG2W7dDuA_8fyekHNk4ronecewolMprW4rxUB8xsc,6228
114
114
  ignite/metrics/psnr.py,sha256=G994inwIczTWC5JfwECr0LSAtgquRGCs0283GylPR8c,5558
115
115
  ignite/metrics/recall.py,sha256=MaywS5E8ioaHZvTPGhQaYPQV-xDmptYuv8kDRe_-BEY,9867
116
- ignite/metrics/roc_auc.py,sha256=jcp6KpLFPr7FrSq6ePvx5lzweny0l0kFwzTS4PEBJ6M,9129
116
+ ignite/metrics/roc_auc.py,sha256=NW_8GKX9W2tSLXn_d9G2A69gkbG62HWOc_YdyzBYO2s,9207
117
117
  ignite/metrics/root_mean_squared_error.py,sha256=yiOn5AQeg-RL2wM1MAng5Q98FHJc21chXU65tITT0Wo,2903
118
118
  ignite/metrics/running_average.py,sha256=vcC_LtsrJxEMea05TmBFzFqCK6nZd8hHavsfIlf2C6c,11333
119
- ignite/metrics/ssim.py,sha256=VhzEnpbpG2eQtkQKgmsVIf0IAavRckz52nIKEHcIGIM,10279
119
+ ignite/metrics/ssim.py,sha256=_uJJdoHP4E4_sitcvFr9wTcoocK3iTxtSh_pA5J7Ss8,11766
120
120
  ignite/metrics/top_k_categorical_accuracy.py,sha256=pqsArVTSxnwt49S3lZFVqOkCXbzx-WPxfQnhtQ390RM,4706
121
121
  ignite/metrics/clustering/__init__.py,sha256=QljKwToBY-0fHblKbj1GsmP7rE5tlzHkrtw98MYEX44,233
122
122
  ignite/metrics/clustering/_base.py,sha256=lpQwtR54oTUrif7vQ7EE3ch8PJ91ECnzLov8z34gf5E,1526
123
- ignite/metrics/clustering/calinski_harabasz_score.py,sha256=i9DbAuFOFIgi7UVnHiiD_YHKnGgdItOyWqM-XrqLgwk,4654
123
+ ignite/metrics/clustering/calinski_harabasz_score.py,sha256=jePNE7u72jh8RYL8Sew9rDn3BX6ydYq5Z2FPst4pqB0,4663
124
124
  ignite/metrics/clustering/davies_bouldin_score.py,sha256=VGC0jA3_gh9s4v3bm7Cw-5IV1ZUbqssYmU3s-rmnl_8,4646
125
- ignite/metrics/clustering/silhouette_score.py,sha256=MewWftWKR17OmkeBHLbzG_3RJs7XvSnfjal2D_3U62c,5151
125
+ ignite/metrics/clustering/silhouette_score.py,sha256=Q9mMcyoR9woHwjxwrAPecFPhKA9bkptoKhhe5-mBfLA,5159
126
126
  ignite/metrics/gan/__init__.py,sha256=mBZQNI5uBd72iMyJs6GpbSBLEMm1-Lu1KtgmDAoH_4I,149
127
- ignite/metrics/gan/fid.py,sha256=QrpTNLLqw1mHPUU5_DfWpIapWH4AjlTXzFdF1IdT8So,10014
127
+ ignite/metrics/gan/fid.py,sha256=rqITDukGd7CgQAMY8GRVPSLVrkF3MjjFR8bxE6M1kpg,10058
128
128
  ignite/metrics/gan/inception_score.py,sha256=78_qrECWb_KsbLbo1lvDnvFJ9FsWPsbUi1aKWyvp8kg,5601
129
129
  ignite/metrics/gan/utils.py,sha256=3nihbBrcM9MRcu6r0p3x5SgZQ5V4aag20ZppM7j_HiM,3993
130
130
  ignite/metrics/nlp/__init__.py,sha256=TiDKRhw7lhZeoL2Cn4s306cKIuBbXl2fizN1ZepMhwI,168
131
- ignite/metrics/nlp/bleu.py,sha256=O88d0-6gEm4ZztSWGkq6f2PPu3Icd8eqXUB6UJKYHmk,11424
132
- ignite/metrics/nlp/rouge.py,sha256=ybdmmne0Td3oWR5KX0jNSTTShsse5p_TyAFR6DerWOc,15364
133
- ignite/metrics/nlp/utils.py,sha256=o6zWzT8lugNAQVxJq-SEDFI35ve5-P-1TwyVu9wZCpM,2353
131
+ ignite/metrics/nlp/bleu.py,sha256=NyQZ3CQB1xUnH_KWer5QtxkM_S_aiO3ok86UMxHaQ_w,11539
132
+ ignite/metrics/nlp/rouge.py,sha256=pcIBCFBybJczYnPxuoLibwzNXYOMxf_JtyFiJkgo10A,15328
133
+ ignite/metrics/nlp/utils.py,sha256=CA0MRMk9l97QockFYYhU6k0-hLhP3GwW36ONZ7TRqmc,2341
134
134
  ignite/metrics/regression/__init__.py,sha256=I594yB38ypWi9IDi9rrdshdXeBnSRcST09tnLRjN0yk,1472
135
- ignite/metrics/regression/_base.py,sha256=K0Xs3ZmtodhPB4GaAkFBwEgb6gbylDbCngBqsK_lbrs,2242
135
+ ignite/metrics/regression/_base.py,sha256=5V6GkkaBYRuW9J3yDXucyTZp1XJ2uIG7F4w2XcBsd3w,2365
136
136
  ignite/metrics/regression/canberra_metric.py,sha256=HqQe-0lfwMMO5e_8hBIaAPS6PyKrIEtBKfRBNJV941Q,3077
137
137
  ignite/metrics/regression/fractional_absolute_error.py,sha256=ANQFQoadcg17ksTj_k0dY1M9E2OO8eboQCzjpRS-FNE,3259
138
- ignite/metrics/regression/fractional_bias.py,sha256=JKxhEaX9vINfwA1UJNIRNw3l3bonagHvj-ts8-iTDrs,3172
138
+ ignite/metrics/regression/fractional_bias.py,sha256=IafPS6cJxhDL_OdJe2SsCtOkiwE-DVt7B2RLkY_SHpM,3178
139
139
  ignite/metrics/regression/geometric_mean_absolute_error.py,sha256=4KWSqONfKK1au4oLZDwWLJw3ENJE7rDGVNYRQYLxj1E,3195
140
140
  ignite/metrics/regression/geometric_mean_relative_absolute_error.py,sha256=vzvnt2sSqBHFaKRu0NqwzGHKwXpamhzv4YqJ4RN8CFA,4265
141
141
  ignite/metrics/regression/kendall_correlation.py,sha256=XVeqnhru0CQSXRz5wezbfWtdIqw9T20xGk_QOf8CztM,5280
@@ -153,8 +153,7 @@ ignite/metrics/regression/spearman_correlation.py,sha256=IzmN4WIe7C4cTUU3BOkBmaw
153
153
  ignite/metrics/regression/wave_hedges_distance.py,sha256=Ji_NRUgnZ3lJgi5fyNFLRjbHO648z4dBmqVDQU9ImKA,2792
154
154
  ignite/metrics/vision/__init__.py,sha256=lPBAEq1idc6Q17poFm1SjttE27irHF1-uNeiwrxnLrU,159
155
155
  ignite/metrics/vision/object_detection_average_precision_recall.py,sha256=PwdXVeGAF0sLIxUrvnnE7ZojpFNkZB5O6bYoopqc3M4,25024
156
- pytorch_ignite-0.6.0.dev20250324.dist-info/LICENSE,sha256=SwJvaRmy1ql-k9_nL4WnER4_ODTMF9fWoP9HXkoicgw,1527
157
- pytorch_ignite-0.6.0.dev20250324.dist-info/METADATA,sha256=EO1dJtV8huhPpGCp0c7WqHSoSVVDgWQZXy26uMD07WI,27997
158
- pytorch_ignite-0.6.0.dev20250324.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
159
- pytorch_ignite-0.6.0.dev20250324.dist-info/top_level.txt,sha256=P2CnXR6kxvOX7ZMdd-9kVUTwLNz98t0sdjKeyvFBkR4,7
160
- pytorch_ignite-0.6.0.dev20250324.dist-info/RECORD,,
156
+ pytorch_ignite-0.6.0.dev20251103.dist-info/METADATA,sha256=L4NDgHM4p5mtFYV7Fbr_OBleFmh2cfvlESJWn0QAyNo,27979
157
+ pytorch_ignite-0.6.0.dev20251103.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
158
+ pytorch_ignite-0.6.0.dev20251103.dist-info/licenses/LICENSE,sha256=SwJvaRmy1ql-k9_nL4WnER4_ODTMF9fWoP9HXkoicgw,1527
159
+ pytorch_ignite-0.6.0.dev20251103.dist-info/RECORD,,