pytorch-ignite 0.6.0.dev20251116__py3-none-any.whl → 0.6.0.dev20260102__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pytorch-ignite might be problematic. Click here for more details.

Files changed (62) hide show
  1. ignite/__init__.py +1 -1
  2. ignite/contrib/engines/common.py +1 -0
  3. ignite/contrib/handlers/base_logger.py +1 -1
  4. ignite/contrib/handlers/clearml_logger.py +1 -1
  5. ignite/contrib/handlers/lr_finder.py +1 -1
  6. ignite/contrib/handlers/mlflow_logger.py +1 -1
  7. ignite/contrib/handlers/neptune_logger.py +1 -1
  8. ignite/contrib/handlers/param_scheduler.py +1 -1
  9. ignite/contrib/handlers/polyaxon_logger.py +1 -1
  10. ignite/contrib/handlers/tensorboard_logger.py +1 -1
  11. ignite/contrib/handlers/time_profilers.py +1 -1
  12. ignite/contrib/handlers/tqdm_logger.py +1 -1
  13. ignite/contrib/handlers/visdom_logger.py +1 -1
  14. ignite/contrib/handlers/wandb_logger.py +1 -1
  15. ignite/contrib/metrics/average_precision.py +1 -1
  16. ignite/contrib/metrics/cohen_kappa.py +1 -1
  17. ignite/contrib/metrics/gpu_info.py +1 -1
  18. ignite/contrib/metrics/precision_recall_curve.py +1 -1
  19. ignite/contrib/metrics/regression/canberra_metric.py +2 -3
  20. ignite/contrib/metrics/regression/fractional_absolute_error.py +2 -3
  21. ignite/contrib/metrics/regression/fractional_bias.py +2 -3
  22. ignite/contrib/metrics/regression/geometric_mean_absolute_error.py +2 -3
  23. ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py +2 -3
  24. ignite/contrib/metrics/regression/manhattan_distance.py +2 -3
  25. ignite/contrib/metrics/regression/maximum_absolute_error.py +2 -3
  26. ignite/contrib/metrics/regression/mean_absolute_relative_error.py +2 -3
  27. ignite/contrib/metrics/regression/mean_error.py +2 -3
  28. ignite/contrib/metrics/regression/mean_normalized_bias.py +2 -3
  29. ignite/contrib/metrics/regression/median_absolute_error.py +2 -3
  30. ignite/contrib/metrics/regression/median_absolute_percentage_error.py +2 -3
  31. ignite/contrib/metrics/regression/median_relative_absolute_error.py +2 -3
  32. ignite/contrib/metrics/regression/r2_score.py +2 -3
  33. ignite/contrib/metrics/regression/wave_hedges_distance.py +2 -3
  34. ignite/contrib/metrics/roc_auc.py +1 -1
  35. ignite/distributed/auto.py +1 -0
  36. ignite/distributed/comp_models/horovod.py +8 -1
  37. ignite/distributed/comp_models/native.py +1 -0
  38. ignite/distributed/comp_models/xla.py +2 -0
  39. ignite/distributed/launcher.py +4 -8
  40. ignite/engine/deterministic.py +1 -1
  41. ignite/engine/engine.py +9 -11
  42. ignite/engine/events.py +2 -1
  43. ignite/handlers/checkpoint.py +2 -2
  44. ignite/handlers/clearml_logger.py +2 -2
  45. ignite/handlers/lr_finder.py +10 -10
  46. ignite/handlers/neptune_logger.py +1 -0
  47. ignite/handlers/param_scheduler.py +7 -3
  48. ignite/handlers/state_param_scheduler.py +8 -2
  49. ignite/handlers/time_profilers.py +6 -3
  50. ignite/handlers/tqdm_logger.py +7 -2
  51. ignite/handlers/visdom_logger.py +2 -2
  52. ignite/handlers/wandb_logger.py +9 -8
  53. ignite/metrics/accuracy.py +2 -0
  54. ignite/metrics/metric.py +1 -0
  55. ignite/metrics/nlp/rouge.py +6 -3
  56. ignite/metrics/roc_auc.py +1 -0
  57. ignite/metrics/ssim.py +4 -0
  58. ignite/metrics/vision/object_detection_average_precision_recall.py +3 -0
  59. {pytorch_ignite-0.6.0.dev20251116.dist-info → pytorch_ignite-0.6.0.dev20260102.dist-info}/METADATA +1 -1
  60. {pytorch_ignite-0.6.0.dev20251116.dist-info → pytorch_ignite-0.6.0.dev20260102.dist-info}/RECORD +62 -62
  61. {pytorch_ignite-0.6.0.dev20251116.dist-info → pytorch_ignite-0.6.0.dev20260102.dist-info}/WHEEL +1 -1
  62. {pytorch_ignite-0.6.0.dev20251116.dist-info → pytorch_ignite-0.6.0.dev20260102.dist-info}/licenses/LICENSE +0 -0
@@ -862,7 +862,7 @@ class ClearMLSaver(DiskSaver):
862
862
  except ImportError:
863
863
  try:
864
864
  # Backwards-compatibility for legacy Trains SDK
865
- from trains import Task # type: ignore[no-redef]
865
+ from trains import Task
866
866
  except ImportError:
867
867
  raise ModuleNotFoundError(
868
868
  "This contrib module requires clearml to be installed. "
@@ -937,7 +937,7 @@ class ClearMLSaver(DiskSaver):
937
937
  except ImportError:
938
938
  try:
939
939
  # Backwards-compatibility for legacy Trains SDK
940
- from trains.binding.frameworks import WeightsFileHandler # type: ignore[no-redef]
940
+ from trains.binding.frameworks import WeightsFileHandler
941
941
  except ImportError:
942
942
  raise ModuleNotFoundError(
943
943
  "This contrib module requires clearml to be installed. "
@@ -98,15 +98,18 @@ class FastaiLRFinder:
98
98
  self._best_loss = None
99
99
  self._diverge_flag = False
100
100
 
101
+ assert trainer.state.epoch_length is not None
102
+ assert trainer.state.max_epochs is not None
103
+
101
104
  # attach LRScheduler to trainer.
102
105
  if num_iter is None:
103
106
  num_iter = trainer.state.epoch_length * trainer.state.max_epochs
104
107
  else:
105
- max_iter = trainer.state.epoch_length * trainer.state.max_epochs # type: ignore[operator]
108
+ max_iter = trainer.state.epoch_length * trainer.state.max_epochs
106
109
  if max_iter < num_iter:
107
110
  max_iter = num_iter
108
111
  trainer.state.max_iters = num_iter
109
- trainer.state.max_epochs = ceil(num_iter / trainer.state.epoch_length) # type: ignore[operator]
112
+ trainer.state.max_epochs = ceil(num_iter / trainer.state.epoch_length)
110
113
 
111
114
  if not trainer.has_event_handler(self._reached_num_iterations):
112
115
  trainer.add_event_handler(Events.ITERATION_COMPLETED, self._reached_num_iterations, num_iter)
@@ -178,17 +181,14 @@ class FastaiLRFinder:
178
181
  loss = idist.all_reduce(loss)
179
182
  lr = self._lr_schedule.get_param()
180
183
  self._history["lr"].append(lr)
181
- if trainer.state.iteration == 1:
182
- self._best_loss = loss # type: ignore[assignment]
183
- else:
184
- if smooth_f > 0:
185
- loss = smooth_f * loss + (1 - smooth_f) * self._history["loss"][-1]
186
- if loss < self._best_loss:
187
- self._best_loss = loss
184
+ if trainer.state.iteration != 1 and smooth_f > 0:
185
+ loss = smooth_f * loss + (1 - smooth_f) * self._history["loss"][-1]
186
+ if self._best_loss is None or loss < self._best_loss:
187
+ self._best_loss = loss
188
188
  self._history["loss"].append(loss)
189
189
 
190
190
  # Check if the loss has diverged; if it has, stop the trainer
191
- if self._history["loss"][-1] > diverge_th * self._best_loss: # type: ignore[operator]
191
+ if self._history["loss"][-1] > diverge_th * self._best_loss:
192
192
  self._diverge_flag = True
193
193
  self.logger.info("Stopping early, the loss has diverged")
194
194
  trainer.terminate()
@@ -698,6 +698,7 @@ class NeptuneSaver(BaseSaveHandler):
698
698
  # hold onto the file stream for uploading.
699
699
  # NOTE: This won't load the whole file in memory and upload
700
700
  # the stream in smaller chunks.
701
+ # pyrefly: ignore [bad-argument-type]
701
702
  self._logger[filename].upload(File.from_stream(tmp.file))
702
703
 
703
704
  @idist.one_rank_only(with_barrier=True)
@@ -1122,13 +1122,14 @@ def create_lr_scheduler_with_warmup(
1122
1122
  f"but given {type(lr_scheduler)}"
1123
1123
  )
1124
1124
 
1125
- if not isinstance(warmup_duration, numbers.Integral):
1125
+ if not isinstance(warmup_duration, int):
1126
1126
  raise TypeError(f"Argument warmup_duration should be integer, but given {warmup_duration}")
1127
1127
 
1128
1128
  if not (warmup_duration > 1):
1129
1129
  raise ValueError(f"Argument warmup_duration should be at least 2 events, but given {warmup_duration}")
1130
1130
 
1131
1131
  warmup_schedulers: List[ParamScheduler] = []
1132
+ milestones_values: List[Tuple[int, float]] = []
1132
1133
 
1133
1134
  for param_group_index, param_group in enumerate(lr_scheduler.optimizer.param_groups):
1134
1135
  if warmup_end_value is None:
@@ -1176,6 +1177,7 @@ def create_lr_scheduler_with_warmup(
1176
1177
  lr_scheduler,
1177
1178
  ]
1178
1179
  durations = [milestones_values[-1][0] + 1]
1180
+ # pyrefly: ignore [bad-argument-type]
1179
1181
  combined_scheduler = ConcatScheduler(schedulers, durations=durations, save_history=save_history)
1180
1182
 
1181
1183
  if output_simulated_values is not None:
@@ -1185,6 +1187,7 @@ def create_lr_scheduler_with_warmup(
1185
1187
  f"but given {type(output_simulated_values)}."
1186
1188
  )
1187
1189
  num_events = len(output_simulated_values)
1190
+ # pyrefly: ignore [bad-argument-type]
1188
1191
  result = ConcatScheduler.simulate_values(num_events=num_events, schedulers=schedulers, durations=durations)
1189
1192
  for i in range(num_events):
1190
1193
  output_simulated_values[i] = result[i]
@@ -1650,6 +1653,7 @@ class ReduceLROnPlateauScheduler(ParamScheduler):
1650
1653
  self.trainer = trainer
1651
1654
  self.optimizer = optimizer
1652
1655
 
1656
+ min_lr: Union[float, List[float]]
1653
1657
  if "min_lr" in scheduler_kwargs and param_group_index is not None:
1654
1658
  min_lr = scheduler_kwargs["min_lr"]
1655
1659
  if not isinstance(min_lr, float):
@@ -1670,11 +1674,11 @@ class ReduceLROnPlateauScheduler(ParamScheduler):
1670
1674
  _scheduler_kwargs["verbose"] = False
1671
1675
 
1672
1676
  self.scheduler = ReduceLROnPlateau(optimizer, **_scheduler_kwargs)
1673
- self.scheduler._reduce_lr = self._reduce_lr # type: ignore[method-assign]
1677
+ self.scheduler._reduce_lr = self._reduce_lr
1674
1678
 
1675
1679
  self._state_attrs += ["metric_name", "scheduler"]
1676
1680
 
1677
- def __call__(self, engine: Engine, name: Optional[str] = None) -> None: # type: ignore[override]
1681
+ def __call__(self, engine: Engine, name: Optional[str] = None) -> None:
1678
1682
  if not hasattr(engine.state, "metrics") or self.metric_name not in engine.state.metrics:
1679
1683
  raise ValueError(
1680
1684
  "Argument engine should have in its 'state', attribute 'metrics' "
@@ -1,7 +1,7 @@
1
1
  import numbers
2
2
  import warnings
3
3
  from bisect import bisect_right
4
- from typing import Any, List, Sequence, Tuple, Union
4
+ from typing import Any, Callable, List, Sequence, Tuple, Union
5
5
 
6
6
  from ignite.engine import CallableEventWithFilter, Engine, Events, EventsList
7
7
  from ignite.handlers.param_scheduler import BaseParamScheduler
@@ -183,7 +183,13 @@ class LambdaStateScheduler(StateParamScheduler):
183
183
 
184
184
  """
185
185
 
186
- def __init__(self, lambda_obj: Any, param_name: str, save_history: bool = False, create_new: bool = False):
186
+ def __init__(
187
+ self,
188
+ lambda_obj: Callable[[int], Union[List[float], float]],
189
+ param_name: str,
190
+ save_history: bool = False,
191
+ create_new: bool = False,
192
+ ):
187
193
  super(LambdaStateScheduler, self).__init__(param_name, save_history, create_new)
188
194
 
189
195
  if not callable(lambda_obj):
@@ -251,6 +251,7 @@ class BasicTimeProfiler:
251
251
  total_eh_time: Union[int, torch.Tensor] = sum(
252
252
  [(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
253
253
  )
254
+ # pyrefly: ignore [no-matching-overload]
254
255
  event_handlers_stats = dict(
255
256
  [
256
257
  (str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
@@ -334,6 +335,7 @@ class BasicTimeProfiler:
334
335
 
335
336
  results_df = pd.DataFrame(
336
337
  data=results_dump,
338
+ # pyrefly: ignore [bad-argument-type]
337
339
  columns=[
338
340
  "epoch",
339
341
  "iteration",
@@ -498,14 +500,14 @@ class HandlersTimeProfiler:
498
500
 
499
501
  self.dataflow_times: List[float] = []
500
502
  self.processing_times: List[float] = []
501
- self.event_handlers_times: Dict[EventEnum, Dict[str, List[float]]] = {}
503
+ self.event_handlers_times: Dict[Union[str, EventEnum], Dict[str, List[float]]] = {}
502
504
 
503
505
  @staticmethod
504
506
  def _get_callable_name(handler: Callable) -> str:
505
507
  # get name of the callable handler
506
508
  return getattr(handler, "__qualname__", handler.__class__.__name__)
507
509
 
508
- def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
510
+ def _create_wrapped_handler(self, handler: Callable, event: Union[str, EventEnum]) -> Callable:
509
511
  @functools.wraps(handler)
510
512
  def _timeit_handler(*args: Any, **kwargs: Any) -> None:
511
513
  self._event_handlers_timer.reset()
@@ -530,7 +532,7 @@ class HandlersTimeProfiler:
530
532
  t = self._dataflow_timer.value()
531
533
  self.dataflow_times.append(t)
532
534
 
533
- def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
535
+ def _reset(self, event_handlers_names: Mapping[Union[str, EventEnum], List[str]]) -> None:
534
536
  # reset the variables used for profiling
535
537
  self.dataflow_times = []
536
538
  self.processing_times = []
@@ -689,6 +691,7 @@ class HandlersTimeProfiler:
689
691
 
690
692
  results_dump = torch.stack(cols, dim=1).numpy()
691
693
 
694
+ # pyrefly: ignore [bad-argument-type]
692
695
  results_df = pd.DataFrame(data=results_dump, columns=headers)
693
696
  results_df.to_csv(output_path, index=False)
694
697
 
@@ -223,8 +223,13 @@ class ProgressBar(BaseLogger):
223
223
  super(ProgressBar, self).attach(engine, log_handler, event_name)
224
224
  engine.add_event_handler(closing_event_name, self._close)
225
225
 
226
- def attach_opt_params_handler( # type: ignore[empty-body]
227
- self, engine: Engine, event_name: Union[str, Events], *args: Any, **kwargs: Any
226
+ def attach_opt_params_handler(
227
+ self,
228
+ engine: Engine,
229
+ event_name: Union[str, Events],
230
+ *args: Any,
231
+ **kwargs: Any,
232
+ # pyrefly: ignore [bad-return]
228
233
  ) -> RemovableEventHandle:
229
234
  """Intentionally empty"""
230
235
  pass
@@ -1,7 +1,7 @@
1
1
  """Visdom logger and its helper handlers."""
2
2
 
3
3
  import os
4
- from typing import Any, Callable, Dict, List, Optional, Union
4
+ from typing import Any, Callable, Dict, List, Optional, Union, TYPE_CHECKING
5
5
 
6
6
  import torch
7
7
  import torch.nn as nn
@@ -165,7 +165,7 @@ class VisdomLogger(BaseLogger):
165
165
  "pip install git+https://github.com/fossasia/visdom.git"
166
166
  )
167
167
 
168
- if num_workers > 0:
168
+ if num_workers > 0 or TYPE_CHECKING:
169
169
  # If visdom is installed, one of its dependencies `tornado`
170
170
  # requires also `futures` to be installed.
171
171
  # Let's check anyway if we can import it.
@@ -1,6 +1,7 @@
1
1
  """WandB logger and its helper handlers."""
2
2
 
3
3
  from typing import Any, Callable, List, Optional, Union
4
+ from warnings import warn
4
5
 
5
6
  from torch.optim import Optimizer
6
7
 
@@ -172,8 +173,7 @@ class OutputHandler(BaseOutputHandler):
172
173
  Default is None, global_step based on attached engine. If provided,
173
174
  uses function output as global_step. To setup global step from another engine, please use
174
175
  :meth:`~ignite.handlers.wandb_logger.global_step_from_engine`.
175
- sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
176
- the default value of wandb.log.
176
+ sync: Deprecated, has no function. Argument is kept here for compatibility with existing code.
177
177
 
178
178
  Examples:
179
179
  .. code-block:: python
@@ -284,7 +284,8 @@ class OutputHandler(BaseOutputHandler):
284
284
  state_attributes: Optional[List[str]] = None,
285
285
  ):
286
286
  super().__init__(tag, metric_names, output_transform, global_step_transform, state_attributes)
287
- self.sync = sync
287
+ if sync is not None:
288
+ warn("The sync argument for the WandBLoggers is no longer used, and may be removed in the future")
288
289
 
289
290
  def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, Events]) -> None:
290
291
  if not isinstance(logger, WandBLogger):
@@ -298,7 +299,7 @@ class OutputHandler(BaseOutputHandler):
298
299
  )
299
300
 
300
301
  metrics = self._setup_output_metrics_state_attrs(engine, log_text=True, key_tuple=False)
301
- logger.log(metrics, step=global_step, sync=self.sync)
302
+ logger.log(metrics, step=global_step)
302
303
 
303
304
 
304
305
  class OptimizerParamsHandler(BaseOptimizerParamsHandler):
@@ -309,8 +310,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
309
310
  as a sequence.
310
311
  param_name: parameter name
311
312
  tag: common title for all produced plots. For example, "generator"
312
- sync: If set to False, process calls to log in a seperate thread. Default (None) uses whatever
313
- the default value of wandb.log.
313
+ sync: Deprecated, has no function. Argument is kept here for compatibility with existing code.
314
314
 
315
315
  Examples:
316
316
  .. code-block:: python
@@ -346,7 +346,8 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
346
346
  self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, sync: Optional[bool] = None
347
347
  ):
348
348
  super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
349
- self.sync = sync
349
+ if sync is not None:
350
+ warn("The sync argument for the WandBLoggers is no longer used, and may be removed in the future")
350
351
 
351
352
  def __call__(self, engine: Engine, logger: WandBLogger, event_name: Union[str, Events]) -> None:
352
353
  if not isinstance(logger, WandBLogger):
@@ -358,4 +359,4 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
358
359
  f"{tag_prefix}{self.param_name}/group_{i}": float(param_group[self.param_name])
359
360
  for i, param_group in enumerate(self.optimizer.param_groups)
360
361
  }
361
- logger.log(params, step=global_step, sync=self.sync)
362
+ logger.log(params, step=global_step)
@@ -254,6 +254,8 @@ class Accuracy(_BaseClassification):
254
254
  y_pred = torch.transpose(y_pred, 1, last_dim - 1).reshape(-1, num_classes)
255
255
  y = torch.transpose(y, 1, last_dim - 1).reshape(-1, num_classes)
256
256
  correct = torch.all(y == y_pred.type_as(y), dim=-1)
257
+ else:
258
+ raise ValueError(f"Unexpected type: {self._type}")
257
259
 
258
260
  self._num_correct += torch.sum(correct).to(self._device)
259
261
  self._num_examples += correct.shape[0]
ignite/metrics/metric.py CHANGED
@@ -369,6 +369,7 @@ class Metric(Serializable, metaclass=ABCMeta):
369
369
  if torch.device(device).type == "xla":
370
370
  raise ValueError("Cannot create metric on an XLA device. Use device='cpu' instead.")
371
371
 
372
+ # pyrefly: ignore [read-only]
372
373
  self._device = torch.device(device)
373
374
  self._skip_unrolling = skip_unrolling
374
375
 
@@ -1,6 +1,5 @@
1
1
  from abc import ABCMeta, abstractmethod
2
- from collections import namedtuple
3
- from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Union
2
+ from typing import Any, Callable, List, Mapping, NamedTuple, Optional, Sequence, Tuple, Union
4
3
 
5
4
  import torch
6
5
 
@@ -13,11 +12,15 @@ from ignite.metrics.nlp.utils import lcs, ngrams
13
12
  __all__ = ["Rouge", "RougeN", "RougeL"]
14
13
 
15
14
 
16
- class Score(namedtuple("Score", ["match", "candidate", "reference"])):
15
+ class Score(NamedTuple):
17
16
  r"""
18
17
  Computes precision and recall for given matches, candidate and reference lengths.
19
18
  """
20
19
 
20
+ match: int
21
+ candidate: int
22
+ reference: int
23
+
21
24
  def precision(self) -> float:
22
25
  """
23
26
  Calculates precision.
ignite/metrics/roc_auc.py CHANGED
@@ -210,4 +210,5 @@ class RocCurve(EpochMetric):
210
210
  tpr = idist.broadcast(tpr, src=0, safe_mode=True)
211
211
  thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
212
212
 
213
+ # pyrefly: ignore [bad-return]
213
214
  return fpr, tpr, thresholds
ignite/metrics/ssim.py CHANGED
@@ -161,11 +161,15 @@ class SSIM(Metric):
161
161
  kernel_y = self._gaussian(kernel_size[1], sigma[1])
162
162
  if ndims == 3:
163
163
  kernel_z = self._gaussian(kernel_size[2], sigma[2])
164
+ else:
165
+ kernel_z = None
164
166
  else:
165
167
  kernel_x = self._uniform(kernel_size[0])
166
168
  kernel_y = self._uniform(kernel_size[1])
167
169
  if ndims == 3:
168
170
  kernel_z = self._uniform(kernel_size[2])
171
+ else:
172
+ kernel_z = None
169
173
 
170
174
  result = (
171
175
  torch.einsum("i,j->ij", kernel_x, kernel_y)
@@ -160,6 +160,9 @@ class ObjectDetectionAvgPrecisionRecall(Metric, _BaseAveragePrecision):
160
160
  elif self._area_range == "large":
161
161
  min_area = 9216
162
162
  max_area = 1e10
163
+ else:
164
+ min_area = 0
165
+ max_area = 1e10
163
166
  return torch.logical_and(areas >= min_area, areas <= max_area)
164
167
 
165
168
  def _check_matching_input(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pytorch-ignite
3
- Version: 0.6.0.dev20251116
3
+ Version: 0.6.0.dev20260102
4
4
  Summary: A lightweight library to help with training neural networks in PyTorch.
5
5
  Project-URL: Homepage, https://pytorch-ignite.ai
6
6
  Project-URL: Repository, https://github.com/pytorch/ignite
@@ -1,4 +1,4 @@
1
- ignite/__init__.py,sha256=E7SP2weQV1yFYLQS4NFtM5OxLKSV5mCv6PJCnG9LJCU,194
1
+ ignite/__init__.py,sha256=k-gLH4LNgVJgTR9LUwySL1NYaJgRB9ttxy1Cpv8AVBw,194
2
2
  ignite/_utils.py,sha256=XDPpUDJ8ykLXWMV2AYTqGSj8XCfApsyzsQ3Vij_OB4M,182
3
3
  ignite/exceptions.py,sha256=5ZWCVLPC9rgoW8t84D-VeEleqz5O7XpAGPpCdU8rKd0,150
4
4
  ignite/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -7,84 +7,84 @@ ignite/base/__init__.py,sha256=y2g9egjuVCYRtaj-4ge081y-8cjIXsw_ZgZ6BRguHi0,44
7
7
  ignite/base/mixins.py,sha256=Ip1SHCQCsvNUnLJKJwX9L-hqpfcZAlTad87-PaVgCBI,991
8
8
  ignite/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  ignite/contrib/engines/__init__.py,sha256=BxmXYIYEtEB1niMWITL8pgyKufCIpXR61rSzPQOhA0g,87
10
- ignite/contrib/engines/common.py,sha256=ChAzJhgqQi_2SLcZF8m9nI9VpefZ3dUZAlJj_PiRwwQ,28433
10
+ ignite/contrib/engines/common.py,sha256=8WyVV6pqVHKnBfcdZoBPbOUXqqwSOTUI2OKUyMqvOks,28483
11
11
  ignite/contrib/engines/tbptt.py,sha256=FSmF5SnoZn7mWNZWRZ-ohWUCfucET78GQu3lvVRNItk,4507
12
12
  ignite/contrib/handlers/__init__.py,sha256=rZszZnCbzncE2jqsvx9KP1iS3WZ0I-CnrV3Jh3Xl8_o,1073
13
- ignite/contrib/handlers/base_logger.py,sha256=uv1VRoSyuoZ2c6mxqJT9jaiPbSNtOp78D685Dog7kVQ,1182
14
- ignite/contrib/handlers/clearml_logger.py,sha256=_vG45njqIH7T2P9X-mxsGWrWLbg7S5HNJuTvcZ_IqkY,1409
15
- ignite/contrib/handlers/lr_finder.py,sha256=A62cXyRXClTHRXUaaUUK7v74Yryudh6o4Af4m9NTF9A,702
16
- ignite/contrib/handlers/mlflow_logger.py,sha256=L9Vl0fgBLLvZKuBjr9xbKE323gxUYPBUshTry9XkWbk,940
17
- ignite/contrib/handlers/neptune_logger.py,sha256=TvKCa2dpfwKNRn0l0C2her6ti8qQ9JrkexGjePgxsLQ,1319
18
- ignite/contrib/handlers/param_scheduler.py,sha256=UbpdeCYHQ59f3M43iBfiVryiTjFGUf1SpyCXgk-AjDM,1517
19
- ignite/contrib/handlers/polyaxon_logger.py,sha256=vZBStQamhlDoRITEby8nkgcvsstv-igcO7EJO3QvEp4,962
20
- ignite/contrib/handlers/tensorboard_logger.py,sha256=l7lK8rumZUz6nqe9M4RRF5HqTUv8341ckaCWYE_4elE,1388
21
- ignite/contrib/handlers/time_profilers.py,sha256=OKl7gm09O5AiQHf6acEjF1g6zLdi7ePpMR-2FCe1Dlg,846
22
- ignite/contrib/handlers/tqdm_logger.py,sha256=WtEPBGYkX4AmH4fzjHP0dTsPN6qwzmbgaD1iotvDzMc,703
23
- ignite/contrib/handlers/visdom_logger.py,sha256=5Cn_UiG5b7ZMZ5E7JVg5WE4pEoztifrZHHEcXx-pmmU,1212
24
- ignite/contrib/handlers/wandb_logger.py,sha256=dF872Eymw9w7Km72wJkCMn5mEpkfwFeyFWCVenx_5U8,929
13
+ ignite/contrib/handlers/base_logger.py,sha256=gHVTkVvYMRUXI793rNq8564mMyJaL_HCuoCu8xiKxFY,1158
14
+ ignite/contrib/handlers/clearml_logger.py,sha256=4CRD38jrif-8MeKYiEu5RbF5B-PhEkPnUGfielvt5s8,1385
15
+ ignite/contrib/handlers/lr_finder.py,sha256=ef9rdoPriBo8wGBfk4hfyRz6JUaQVXJmSxeNQOnznjE,678
16
+ ignite/contrib/handlers/mlflow_logger.py,sha256=fIfYVqydhQNVXnAzAKhhBKv1t-KWDzvZo-zs291zcy4,916
17
+ ignite/contrib/handlers/neptune_logger.py,sha256=gOkdMGoKZVqEnCAF-EVfHL-wk0OLXxGwcoXya2O_NIE,1295
18
+ ignite/contrib/handlers/param_scheduler.py,sha256=Z651F0MBJrmgFz5TQMh_BlbXhxr-mh6rs4ZMY5naqQ0,1493
19
+ ignite/contrib/handlers/polyaxon_logger.py,sha256=WZDKx86S_AqGiRnaEZmCP4BoB5c6G7Sd1b0MregAFJI,938
20
+ ignite/contrib/handlers/tensorboard_logger.py,sha256=DwjicfzeW9108k2lxmeZ21e4hcPIcddzbqUQpG9g2NU,1364
21
+ ignite/contrib/handlers/time_profilers.py,sha256=n5QKwor0xdrNmjfgH6q3TZFFk3GBVEdnbkGtvSqFnVc,822
22
+ ignite/contrib/handlers/tqdm_logger.py,sha256=bcqx7YwWCx5pjhIkKwcpU4jMwLBVfTrvxp_2rau5GZM,679
23
+ ignite/contrib/handlers/visdom_logger.py,sha256=uTs9kE4-e2YRrZgkZJGHpjmE19WYGC8Mj3Ke1FpUrGU,1188
24
+ ignite/contrib/handlers/wandb_logger.py,sha256=QUAsXlVq7b9NcXoc1-99jfoghYrxC2izAL-ZIknVNRo,905
25
25
  ignite/contrib/metrics/__init__.py,sha256=Hh_hYZol4iAZ5nH_WEFIaiuqgw7UqkJrxj0mb-Oaj9A,414
26
- ignite/contrib/metrics/average_precision.py,sha256=5bY5N6YRX88wV7Q69Q2B84LyFRlIaJ8Vo_J-wB6PGds,759
27
- ignite/contrib/metrics/cohen_kappa.py,sha256=fakVVFraPHNVlMRlSPtQQDizH18Bwnw4xFe_qNCvAZ8,692
28
- ignite/contrib/metrics/gpu_info.py,sha256=PffJkgNXo8qD6V9msh9XcZvGKeX2s6BGDeLCH0luRtk,659
29
- ignite/contrib/metrics/precision_recall_curve.py,sha256=qfNM-W2WbaBMhjOd8Y3ENuwkxvEGGAlp8SIZqfw3jOE,956
30
- ignite/contrib/metrics/roc_auc.py,sha256=KxQlipus1_nht3N0Q0wW2iWNwV3urQZzupGKHcSCeiI,687
26
+ ignite/contrib/metrics/average_precision.py,sha256=QBMwk8qVxiLXtIKnLPjspXUKvsQE7H0Xxq3CLFciwdA,735
27
+ ignite/contrib/metrics/cohen_kappa.py,sha256=GoxJkBgIB21E5Nl_UqIe5oLqEHrGeQePpPcZot6-rJ8,668
28
+ ignite/contrib/metrics/gpu_info.py,sha256=s7KWhvoLzy7_AT5r4624ivSqHJ7Nc2D0s6XLgv7xsCo,635
29
+ ignite/contrib/metrics/precision_recall_curve.py,sha256=_Y6Ra6JdBP5Qng0Lx7_qy35BeLpRz77SHN-HN-B5S88,932
30
+ ignite/contrib/metrics/roc_auc.py,sha256=tI5yXSWCt6W4JDSnAEcuN5gyV7u6JhOJwB-DnEiXKyw,663
31
31
  ignite/contrib/metrics/regression/__init__.py,sha256=pyDDt7-eG_xaii_2Noy3DkHh6E437Aqx8XfgRhGGiew,2581
32
32
  ignite/contrib/metrics/regression/_base.py,sha256=z3HI4L5ezfmvHcJziTsDtNNNQqQX2QxdR80CwWmXaTs,2268
33
- ignite/contrib/metrics/regression/canberra_metric.py,sha256=3tantTRqmW15jQZ6UJTS0iVY-7cBpBUCoozUmfVEWMk,818
34
- ignite/contrib/metrics/regression/fractional_absolute_error.py,sha256=LIn9X-UhAW7kTPRMby5IvT9Z9Gg2MhfWgqYJiwfoEDg,924
35
- ignite/contrib/metrics/regression/fractional_bias.py,sha256=_qkvHWZqAaPCdRYqwU3HvzkIUi4qalSq5cIoh1ejbtY,818
36
- ignite/contrib/metrics/regression/geometric_mean_absolute_error.py,sha256=F4ixbdhQsraztRMVIjQfhQ3VtzbS9IwYEWDVmLD67A8,964
37
- ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py,sha256=XVLou8nz3qBfmoXH8EZ08lIc_8T1J0zwksZS2crLFtk,1059
38
- ignite/contrib/metrics/regression/manhattan_distance.py,sha256=dT7U8iaX05tfN95nRaLIFehb8Gy8EOgLgVL_vepzSns,851
39
- ignite/contrib/metrics/regression/maximum_absolute_error.py,sha256=hYCR-R80BqFa39-diInnGSqAlRU1mqZe4hIK5_X80tA,891
40
- ignite/contrib/metrics/regression/mean_absolute_relative_error.py,sha256=gD_Y7bq_V7l2uYZ0dAWsRC6G5q9ccRU_kozlTfOT9ho,953
41
- ignite/contrib/metrics/regression/mean_error.py,sha256=53dmrYXuF2wEB5wh6-XjFJaPVxplhmFvGChPMHZ2XUo,763
42
- ignite/contrib/metrics/regression/mean_normalized_bias.py,sha256=fzHfqErIvxhzyT5Ig4hWYX1jCFLBTVxK60XTBPoDZAc,869
43
- ignite/contrib/metrics/regression/median_absolute_error.py,sha256=4GiWvDqhq5NlZju8ZtdfRFrT9UrtAxe-R-M80CCXqak,880
44
- ignite/contrib/metrics/regression/median_absolute_percentage_error.py,sha256=zODfHYqn8VDwUut3H24nuP-YHQG4iShrHi17g7xTJKY,997
45
- ignite/contrib/metrics/regression/median_relative_absolute_error.py,sha256=5G2QIoE_LsyQ54lH9dDAjdVuJ7qAKfBYKAKe0ks87yM,975
46
- ignite/contrib/metrics/regression/r2_score.py,sha256=g4dTNC6idrgnTZSs2o_xxp5_JuP1JVgyZvNtCpjtRVs,741
47
- ignite/contrib/metrics/regression/wave_hedges_distance.py,sha256=z1eGISFBmo4BjXrGpAndV7Vk7xg7l7jKnwZVGDlLrVI,869
33
+ ignite/contrib/metrics/regression/canberra_metric.py,sha256=J9wHIM19uooMyFT95B01y3L9xH2fQDSct3nwq0HtFlo,796
34
+ ignite/contrib/metrics/regression/fractional_absolute_error.py,sha256=t1jqwbMtN4JUPgz3WzTQhX2kSdli172lgiAiShfqK-M,902
35
+ ignite/contrib/metrics/regression/fractional_bias.py,sha256=qlRPtzDEkZ_rTO0_0F-2hPxsEPvGweQ61wCzJd82psE,796
36
+ ignite/contrib/metrics/regression/geometric_mean_absolute_error.py,sha256=LPmjhPCp_o5cXFLVMYm6eEC0RNINovftuC53JEBtmwo,942
37
+ ignite/contrib/metrics/regression/geometric_mean_relative_absolute_error.py,sha256=n_dc0IAk2qnxeGyteSaEyejTeWMMcB2mDPQLR7Dj6EY,1037
38
+ ignite/contrib/metrics/regression/manhattan_distance.py,sha256=Cuzj8gJABN59jxLg3B13iUXzzC9nGoVX8trNDBs_N14,829
39
+ ignite/contrib/metrics/regression/maximum_absolute_error.py,sha256=J3JFZp5nMdmjtyGkcS1Wo4sIeHzxKhx02RK8Ro0f8Rs,869
40
+ ignite/contrib/metrics/regression/mean_absolute_relative_error.py,sha256=CYr02QTeKubrgsGKuvt1i83Yjxa6CWWjgEa7uHVX8HU,931
41
+ ignite/contrib/metrics/regression/mean_error.py,sha256=rNJuoCDYzTS7sbHG-2gap3nzsq--GYG_brHAV0gac14,741
42
+ ignite/contrib/metrics/regression/mean_normalized_bias.py,sha256=bO4HdG4W-WNjuSMYUU-MrVfHQKHI__3NdiuFMbd4HOs,847
43
+ ignite/contrib/metrics/regression/median_absolute_error.py,sha256=z3xesUZUmjtSXUDQ29r2E6x8BiDNWjec7_KLRUFCv_o,858
44
+ ignite/contrib/metrics/regression/median_absolute_percentage_error.py,sha256=ogc_WJFF9NWjzSogSOaAtmZAadM74jv_eDo_69QoiXo,975
45
+ ignite/contrib/metrics/regression/median_relative_absolute_error.py,sha256=lqzsNL-TIGPv97xlOwV2aNlZJM_vQXgl5pKnvAOAI4Y,953
46
+ ignite/contrib/metrics/regression/r2_score.py,sha256=1Mwo3Ft2PkYL8xq-CcbKqidJP5jeaWe1ba5sVESsTaU,719
47
+ ignite/contrib/metrics/regression/wave_hedges_distance.py,sha256=1uSqAUZX5aBzw0UJNla6bRYhHM3uPdVPuEzNJa4dixk,847
48
48
  ignite/distributed/__init__.py,sha256=qC28ok9XHWJawZfQR2MqWf6ctggS4rUY9PiTJjOCNvI,181
49
- ignite/distributed/auto.py,sha256=iqJ5-kkOqwCjIOociEB4N8gtgKO7J-2thJsYn4pvGnk,15401
50
- ignite/distributed/launcher.py,sha256=hjdL8pnWNrpMQjw_GrY9CGWyUqvb6g42nfEsT_5cxdo,13492
49
+ ignite/distributed/auto.py,sha256=9nk9ArklntyzTaHx-odUTtKtX7bch-qQf1HQE7Y6YQE,15443
50
+ ignite/distributed/launcher.py,sha256=lEzoLqfVQDDXoPJ0ELUNs7090o1I6cDBFKuq3lTLPs4,13298
51
51
  ignite/distributed/utils.py,sha256=D97JwWgL9RKP8rTfDRf1zMmfRUeJizr7XfLZ8LAScOI,24999
52
52
  ignite/distributed/comp_models/__init__.py,sha256=S2WHl463U7BvpcUe9-JaGtuCi3G1cMHFW5QFBQ6fv20,1357
53
53
  ignite/distributed/comp_models/base.py,sha256=pTIylP1h2g6NWopBEponfXC6UefqS1l2lEdzTUTNXFc,14185
54
- ignite/distributed/comp_models/horovod.py,sha256=Mn5xG92bzBT1Dd-0IxEaN342kihUwJNkLCqsxP06ijc,9076
55
- ignite/distributed/comp_models/native.py,sha256=oxJeK-Dc_dtLpG02wz29y5BvjJv_a3vEXPlRVRLWEIU,28098
56
- ignite/distributed/comp_models/xla.py,sha256=kVa5HrXaWt8TCLObxUMVpSsUnDv3SPOQ0dP2SlqpmLg,6281
54
+ ignite/distributed/comp_models/horovod.py,sha256=pGrcHQcwjuuMWJufBR4RyT5YR6RHT8wtk4-Bz_ir3_w,9353
55
+ ignite/distributed/comp_models/native.py,sha256=k2ADEkHNTRDyWfBE1JP7AvTQTjjPtW8a2pyNLkeV6AQ,28139
56
+ ignite/distributed/comp_models/xla.py,sha256=XhKFeo7kNu4mTe9yyzLoEzxS8cDbTFJKAYY9m_dDHIk,6367
57
57
  ignite/engine/__init__.py,sha256=MRFj6yywKhVkov4ccPkrw4dX1O8PfqceiJkngrcFb7A,36094
58
- ignite/engine/deterministic.py,sha256=lIacEolZPmfPgVWM_T0Eqg-2G08Wpi_hc1fifzFq0p8,11632
59
- ignite/engine/engine.py,sha256=xeUx7QkjC4p6iLMCTzFDU7Pq7PXoaoFaRf8TcpIuSog,60915
60
- ignite/engine/events.py,sha256=ydfG3HPMo3HKcycFSG_GrZ199Tuttcjmd85eQaV_5c0,21807
58
+ ignite/engine/deterministic.py,sha256=uXn5VfxN_AgcEzZwBk_zdPWlSdKH2tl8Md1lcx1mvJ4,11643
59
+ ignite/engine/engine.py,sha256=R0cDvh_MxFWOucmVuxrjiH3_xcybNDo9c4BkHUk2CEI,60713
60
+ ignite/engine/events.py,sha256=FrcvnvjNZEzzohMQU6ZxL8ezrUQshUuM917Rsyxf8v0,21833
61
61
  ignite/engine/utils.py,sha256=QG5mkdg4OipspqgpNQcJuoHTYdr2Sx5LS16kfjOHDdI,1073
62
62
  ignite/handlers/__init__.py,sha256=Qq85YTtHPcii6UAfMOoCPg9RwigH96iqxOJKIlRfDqw,2728
63
63
  ignite/handlers/base_logger.py,sha256=wPiGn9iCh5ung1GaRUf_qAlqe63h1NpUUQ0XK709p2k,13011
64
- ignite/handlers/checkpoint.py,sha256=1k_RhDW5rjkJB4oz2jNWxjpuGgLvbNCt7_-1Pqz9Lxg,46266
65
- ignite/handlers/clearml_logger.py,sha256=12a9eue6hnFh5CrdSFz_EpGF0-XKRMlBXpR2NWWw8DY,37949
64
+ ignite/handlers/checkpoint.py,sha256=u6cFUDxAoSSBKCBprmDud2LEZGDEYHvyCoLUmtG3Xd4,46309
65
+ ignite/handlers/clearml_logger.py,sha256=0-57RYznIz-EgTsKtkKFPdGGFQXJIhq146H_qiE8hVc,37897
66
66
  ignite/handlers/early_stopping.py,sha256=UA6TiKho5CbD085R-16H8w3r0BYPQcWQjhEXg8aITSw,4139
67
67
  ignite/handlers/ema_handler.py,sha256=SmUyyWIFPZW3yMvjD_sSk5m_LfnMFl9R-uQdbXNFfY0,11854
68
68
  ignite/handlers/fbresearch_logger.py,sha256=MfQeiBIXBYLEwZoDIld2oCceMeTAsz8rc5cd7fLtpJs,11133
69
- ignite/handlers/lr_finder.py,sha256=AAQLUcRLrfkrVOQufmRNZqAOTw1MpmAAo8YLk3JkdQs,22145
69
+ ignite/handlers/lr_finder.py,sha256=EMcQR3NDPOuh2s85a5Zu5Bqt0I4pg1cACJpjSa5cO4A,22100
70
70
  ignite/handlers/mlflow_logger.py,sha256=M5Mggrnr2wMsms8wbEaHqNtTk5L1zNs1MlPWD0ZCpDQ,13894
71
- ignite/handlers/neptune_logger.py,sha256=SrehLclS8ccyuxO_0HYPvt5SN8EB8g9KWFfqQMQsGAw,27298
72
- ignite/handlers/param_scheduler.py,sha256=c730LIS6obDNNH2jitc2BRDK6AO36FfD3e1x336Oen4,68261
71
+ ignite/handlers/neptune_logger.py,sha256=Rv-O_i0zGZC2Ozzeetxv7rtD7iP3IeWEcbY-U28Mkzg,27348
72
+ ignite/handlers/param_scheduler.py,sha256=Tn4o27YBrp5JsuadHobIrsHfmvB_cR1IrV_oV1Eo7us,68373
73
73
  ignite/handlers/polyaxon_logger.py,sha256=5b7Zxhksne8Ufg_SBTG-rlf_9CPSjkBQOJR4-ynoZnQ,12354
74
- ignite/handlers/state_param_scheduler.py,sha256=xBOF07_JVexafmC-k4ifL_nN31IF8ThbebGWIxlbLs8,20745
74
+ ignite/handlers/state_param_scheduler.py,sha256=B89YKZyj9DXLXQyr3amDNMslUOWNHZDis2DXIwW0q10,20841
75
75
  ignite/handlers/stores.py,sha256=8XM_Qqsitfu0WtOOE-K2FMtv51vD90r3GgQlCzRABYc,2616
76
76
  ignite/handlers/tensorboard_logger.py,sha256=q3YxXkbIFayBggI_kcHyl-upttVVjjnqFOLgyjj2cRo,27967
77
77
  ignite/handlers/terminate_on_nan.py,sha256=RFSKd3Oqn9Me2xLCos4lSE-hnY7fYWWjE9blioeMlIs,2103
78
78
  ignite/handlers/time_limit.py,sha256=heTuS-ReBbOUCm1NcNJGhzxI080Hanc4hOLB2Y4GeZk,1567
79
- ignite/handlers/time_profilers.py,sha256=GZCoOpiFSc2yVgHQjpS1To8Yjb6G6HwydsiWMjwMQfA,30301
79
+ ignite/handlers/time_profilers.py,sha256=8iCcBYPxv0vKFSO_ujFV0ST54a9PD9ezFLvYTIu9lFI,30482
80
80
  ignite/handlers/timing.py,sha256=nHeBHvPwYdPRMAx-jk_8MjZit4a7rmsmIWkUrajAG-s,4705
81
- ignite/handlers/tqdm_logger.py,sha256=5N70XA9rRm2x6sWYAJB1U5Y_bky2fa3qhec8SVgB3hY,13049
81
+ ignite/handlers/tqdm_logger.py,sha256=3kxH39vM0LCDVwIZl9HQRaWM2Pr6bYC_l9oydFJmdM4,13093
82
82
  ignite/handlers/utils.py,sha256=X4LRqo1kqGsbmX0pEuZKYR6K4C8sZudAqxCLriiXtCg,872
83
- ignite/handlers/visdom_logger.py,sha256=sg75ohEkDT7gYfEbLn5464GO-s0MLUEWxdFw_zSVSYw,21830
84
- ignite/handlers/wandb_logger.py,sha256=gGvbFNjc6gCfVFfOXcnz3-P4sqqP-P9at1UwUV_mwMg,14701
83
+ ignite/handlers/visdom_logger.py,sha256=RY5ss3NAPad7d3xFFnqczCtuO6RgmWq9ROz-sFf6imI,21862
84
+ ignite/handlers/wandb_logger.py,sha256=vGok3gADQmTNkc6KkfFBreYoHAO8EneuU65xjBpT5-Q,14837
85
85
  ignite/metrics/__init__.py,sha256=m-8F8J17r-aEwsO6Ww-8AqDRN59WFfYBwCDKwqGDSmI,3627
86
86
  ignite/metrics/accumulation.py,sha256=xWdsm9u6JfsfODX_GUKzQc_omrdFDJ4yELBR-xXgc4s,12448
87
- ignite/metrics/accuracy.py,sha256=rI1TG-7WdJxcqGCMxGErXBWLmTNP1yczJgjjRyby0No,10168
87
+ ignite/metrics/accuracy.py,sha256=W8mO4W11VzryMXKy8G7W_g4A9PH9RYpejW_tQ-T_Txw,10245
88
88
  ignite/metrics/average_precision.py,sha256=AL4fvWCUL6zMNq_u2vQRnAdmdByB8S8x8jSE-MoFVjY,3694
89
89
  ignite/metrics/classification_report.py,sha256=zjGlaMnRz2__op6hrZq74OusO0W_5B1AIe8KzYGFilM,5988
90
90
  ignite/metrics/cohen_kappa.py,sha256=Qwcd4P2kN12CVCFC-kVdzn_2XV7kGzP6LlWkK209JJ8,3815
@@ -104,7 +104,7 @@ ignite/metrics/mean_absolute_error.py,sha256=gfbzoXNdyj9GCEzSxHXn0359TNNjnKBYshS
104
104
  ignite/metrics/mean_average_precision.py,sha256=cXP9pYidQnAazGXBrhC80WoI4eK4lb3avNO5d70TLd4,19136
105
105
  ignite/metrics/mean_pairwise_distance.py,sha256=Ys6Rns6s-USS_tyP6Pa3bWZSI7f_hP5-lZM64UGJGjo,4104
106
106
  ignite/metrics/mean_squared_error.py,sha256=QdxXMYzxltfciMMRxxK5JhdlKXsdHe370EzwvIbwSmA,3679
107
- ignite/metrics/metric.py,sha256=3dv3vy-YTgzC5aIZgLCW6pdiPzF5GqgKpqeSreEifA8,35103
107
+ ignite/metrics/metric.py,sha256=T3IiFIGTv_UOScd8ei4H9SraHfTJ09OM8I6hRfzr_sA,35141
108
108
  ignite/metrics/metric_group.py,sha256=UE7WrMbpKlO9_DPqxQdlmFAWveWoT1knKwRlHDl9YIU,2544
109
109
  ignite/metrics/metrics_lambda.py,sha256=NwKZ1J-KzFFbSw7YUaNJozdfKZLVqrkjQvFKT6ixnkg,7309
110
110
  ignite/metrics/multilabel_confusion_matrix.py,sha256=1pjLNPGTDJWAkN_BHdBPekcish6Ra0uRUeEbdj3Dm6Y,7377
@@ -113,10 +113,10 @@ ignite/metrics/precision.py,sha256=xe8_e13cPMaC1Mfw-RTlmkag6pdcHCIbi70ASI1IahY,1
113
113
  ignite/metrics/precision_recall_curve.py,sha256=rcmG2W7dDuA_8fyekHNk4ronecewolMprW4rxUB8xsc,6228
114
114
  ignite/metrics/psnr.py,sha256=G994inwIczTWC5JfwECr0LSAtgquRGCs0283GylPR8c,5558
115
115
  ignite/metrics/recall.py,sha256=MaywS5E8ioaHZvTPGhQaYPQV-xDmptYuv8kDRe_-BEY,9867
116
- ignite/metrics/roc_auc.py,sha256=NW_8GKX9W2tSLXn_d9G2A69gkbG62HWOc_YdyzBYO2s,9207
116
+ ignite/metrics/roc_auc.py,sha256=U97y_JApK2vU1OmZKUJqolHQOZ1qemCSHdxcsLOO2Jg,9246
117
117
  ignite/metrics/root_mean_squared_error.py,sha256=yiOn5AQeg-RL2wM1MAng5Q98FHJc21chXU65tITT0Wo,2903
118
118
  ignite/metrics/running_average.py,sha256=vcC_LtsrJxEMea05TmBFzFqCK6nZd8hHavsfIlf2C6c,11333
119
- ignite/metrics/ssim.py,sha256=_uJJdoHP4E4_sitcvFr9wTcoocK3iTxtSh_pA5J7Ss8,11766
119
+ ignite/metrics/ssim.py,sha256=yU877i4wXcHA7vr5qAU9p0LmehEJdKQTFzd2L4Lwm3Q,11866
120
120
  ignite/metrics/top_k_categorical_accuracy.py,sha256=pqsArVTSxnwt49S3lZFVqOkCXbzx-WPxfQnhtQ390RM,4706
121
121
  ignite/metrics/clustering/__init__.py,sha256=QljKwToBY-0fHblKbj1GsmP7rE5tlzHkrtw98MYEX44,233
122
122
  ignite/metrics/clustering/_base.py,sha256=lpQwtR54oTUrif7vQ7EE3ch8PJ91ECnzLov8z34gf5E,1526
@@ -129,7 +129,7 @@ ignite/metrics/gan/inception_score.py,sha256=78_qrECWb_KsbLbo1lvDnvFJ9FsWPsbUi1a
129
129
  ignite/metrics/gan/utils.py,sha256=3nihbBrcM9MRcu6r0p3x5SgZQ5V4aag20ZppM7j_HiM,3993
130
130
  ignite/metrics/nlp/__init__.py,sha256=TiDKRhw7lhZeoL2Cn4s306cKIuBbXl2fizN1ZepMhwI,168
131
131
  ignite/metrics/nlp/bleu.py,sha256=NyQZ3CQB1xUnH_KWer5QtxkM_S_aiO3ok86UMxHaQ_w,11539
132
- ignite/metrics/nlp/rouge.py,sha256=pcIBCFBybJczYnPxuoLibwzNXYOMxf_JtyFiJkgo10A,15328
132
+ ignite/metrics/nlp/rouge.py,sha256=siAxJzGE3KjH23u-F3DCUPke--ls-1XMygncGhTYJp4,15313
133
133
  ignite/metrics/nlp/utils.py,sha256=CA0MRMk9l97QockFYYhU6k0-hLhP3GwW36ONZ7TRqmc,2341
134
134
  ignite/metrics/regression/__init__.py,sha256=I594yB38ypWi9IDi9rrdshdXeBnSRcST09tnLRjN0yk,1472
135
135
  ignite/metrics/regression/_base.py,sha256=5V6GkkaBYRuW9J3yDXucyTZp1XJ2uIG7F4w2XcBsd3w,2365
@@ -152,8 +152,8 @@ ignite/metrics/regression/r2_score.py,sha256=mTW5ldE05UtPdBGjo_LQF96fbS5jjQbM9gL
152
152
  ignite/metrics/regression/spearman_correlation.py,sha256=IzmN4WIe7C4cTUU3BOkBmaw4gW6LTYJUFVhWeblvDVA,4603
153
153
  ignite/metrics/regression/wave_hedges_distance.py,sha256=Ji_NRUgnZ3lJgi5fyNFLRjbHO648z4dBmqVDQU9ImKA,2792
154
154
  ignite/metrics/vision/__init__.py,sha256=lPBAEq1idc6Q17poFm1SjttE27irHF1-uNeiwrxnLrU,159
155
- ignite/metrics/vision/object_detection_average_precision_recall.py,sha256=PwdXVeGAF0sLIxUrvnnE7ZojpFNkZB5O6bYoopqc3M4,25024
156
- pytorch_ignite-0.6.0.dev20251116.dist-info/METADATA,sha256=3EQDV5v1J0ZJ0NURJ1aFAvc_xRNedW_LJePKo3M7ap8,27979
157
- pytorch_ignite-0.6.0.dev20251116.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
158
- pytorch_ignite-0.6.0.dev20251116.dist-info/licenses/LICENSE,sha256=SwJvaRmy1ql-k9_nL4WnER4_ODTMF9fWoP9HXkoicgw,1527
159
- pytorch_ignite-0.6.0.dev20251116.dist-info/RECORD,,
155
+ ignite/metrics/vision/object_detection_average_precision_recall.py,sha256=4wwiNVd658ynIpIbQlffTA-ehvyJ2EzmJ5pBSBuA8XQ,25091
156
+ pytorch_ignite-0.6.0.dev20260102.dist-info/METADATA,sha256=Rw6NdCNx5egUSgLTA0RjoYxc_Ut9wX95aIdFa4EdJBk,27979
157
+ pytorch_ignite-0.6.0.dev20260102.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
158
+ pytorch_ignite-0.6.0.dev20260102.dist-info/licenses/LICENSE,sha256=SwJvaRmy1ql-k9_nL4WnER4_ODTMF9fWoP9HXkoicgw,1527
159
+ pytorch_ignite-0.6.0.dev20260102.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any