pytorch-ignite 0.6.0.dev20250510__py3-none-any.whl → 0.6.0.dev20251103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pytorch-ignite might be problematic. Click here for more details.

ignite/__init__.py CHANGED
@@ -6,4 +6,4 @@ import ignite.handlers
6
6
  import ignite.metrics
7
7
  import ignite.utils
8
8
 
9
- __version__ = "0.6.0.dev20250510"
9
+ __version__ = "0.6.0.dev20251103"
@@ -178,7 +178,7 @@ if has_native_dist_support:
178
178
  c: Counter = Counter(hostnames)
179
179
  sizes = torch.tensor([0] + list(c.values()))
180
180
  cumsum_sizes = torch.cumsum(sizes, dim=0)
181
- node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()
181
+ node_rank = cast(int, (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item())
182
182
  local_rank = rank - cumsum_sizes[node_rank].item()
183
183
  return int(local_rank), node_rank
184
184
 
ignite/engine/__init__.py CHANGED
@@ -133,11 +133,11 @@ def supervised_training_step_amp(
133
133
  prepare_batch: Callable = _prepare_batch,
134
134
  model_transform: Callable[[Any], Any] = lambda output: output,
135
135
  output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
136
- scaler: Optional["torch.cuda.amp.GradScaler"] = None,
136
+ scaler: Optional["torch.amp.GradScaler"] = None,
137
137
  gradient_accumulation_steps: int = 1,
138
138
  model_fn: Callable[[torch.nn.Module, Any], Any] = lambda model, x: model(x),
139
139
  ) -> Callable:
140
- """Factory function for supervised training using ``torch.cuda.amp``.
140
+ """Factory function for supervised training using ``torch.amp``.
141
141
 
142
142
  Args:
143
143
  model: the model to train.
@@ -170,7 +170,7 @@ def supervised_training_step_amp(
170
170
  model = ...
171
171
  optimizer = ...
172
172
  loss_fn = ...
173
- scaler = torch.cuda.amp.GradScaler(2**10)
173
+ scaler = torch.amp.GradScaler('cuda', 2**10)
174
174
 
175
175
  update_fn = supervised_training_step_amp(model, optimizer, loss_fn, 'cuda', scaler=scaler)
176
176
  trainer = Engine(update_fn)
@@ -393,8 +393,8 @@ def supervised_training_step_tpu(
393
393
 
394
394
 
395
395
  def _check_arg(
396
- on_tpu: bool, on_mps: bool, amp_mode: Optional[str], scaler: Optional[Union[bool, "torch.cuda.amp.GradScaler"]]
397
- ) -> Tuple[Optional[str], Optional["torch.cuda.amp.GradScaler"]]:
396
+ on_tpu: bool, on_mps: bool, amp_mode: Optional[str], scaler: Optional[Union[bool, "torch.amp.GradScaler"]]
397
+ ) -> Tuple[Optional[str], Optional["torch.amp.GradScaler"]]:
398
398
  """Checking tpu, mps, amp and GradScaler instance combinations."""
399
399
  if on_mps and amp_mode:
400
400
  raise ValueError("amp_mode cannot be used with mps device. Consider using amp_mode=None or device='cuda'.")
@@ -410,9 +410,9 @@ def _check_arg(
410
410
  raise ValueError(f"scaler argument is {scaler}, but amp_mode is {amp_mode}. Consider using amp_mode='amp'.")
411
411
  elif amp_mode == "amp" and isinstance(scaler, bool):
412
412
  try:
413
- from torch.cuda.amp import GradScaler
413
+ from torch.amp import GradScaler
414
414
  except ImportError:
415
- raise ImportError("Please install torch>=1.6.0 to use scaler argument.")
415
+ raise ImportError("Please install torch>=2.3.1 to use scaler argument.")
416
416
  scaler = GradScaler(enabled=True)
417
417
 
418
418
  if on_tpu:
@@ -434,7 +434,7 @@ def create_supervised_trainer(
434
434
  output_transform: Callable[[Any, Any, Any, torch.Tensor], Any] = lambda x, y, y_pred, loss: loss.item(),
435
435
  deterministic: bool = False,
436
436
  amp_mode: Optional[str] = None,
437
- scaler: Union[bool, "torch.cuda.amp.GradScaler"] = False,
437
+ scaler: Union[bool, "torch.amp.GradScaler"] = False,
438
438
  gradient_accumulation_steps: int = 1,
439
439
  model_fn: Callable[[torch.nn.Module, Any], Any] = lambda model, x: model(x),
440
440
  ) -> Engine:
@@ -459,7 +459,7 @@ def create_supervised_trainer(
459
459
  :class:`~ignite.engine.deterministic.DeterministicEngine`, otherwise :class:`~ignite.engine.engine.Engine`
460
460
  (default: False).
461
461
  amp_mode: can be ``amp`` or ``apex``, model and optimizer will be casted to float16 using
462
- `torch.cuda.amp <https://pytorch.org/docs/stable/amp.html>`_ for ``amp`` and
462
+ `torch.amp <https://pytorch.org/docs/stable/amp.html>`_ for ``amp`` and
463
463
  using `apex <https://nvidia.github.io/apex>`_ for ``apex``. (default: None)
464
464
  scaler: GradScaler instance for gradient scaling if `torch>=1.6.0`
465
465
  and ``amp_mode`` is ``amp``. If ``amp_mode`` is ``apex``, this argument will be ignored.
ignite/engine/engine.py CHANGED
@@ -249,6 +249,17 @@ class Engine(Serializable):
249
249
  # we need to update state attributes associated with new custom events
250
250
  self.state._update_attrs()
251
251
 
252
+ def has_registered_events(self, event: Any) -> bool:
253
+ """Check whether engine has a registered event.
254
+
255
+ Args:
256
+ event: Event to check for registration.
257
+
258
+ Returns:
259
+ bool: True if the event is registered, False otherwise.
260
+ """
261
+ return event in self._allowed_events
262
+
252
263
  def _handler_wrapper(self, handler: Callable, event_name: Any, event_filter: Callable) -> Callable:
253
264
  # signature of the following wrapper will be inspected during registering to check if engine is necessary
254
265
  # we have to build a wrapper with relevant signature : solution is functools.wraps
@@ -328,7 +339,7 @@ class Engine(Serializable):
328
339
 
329
340
  try:
330
341
  _check_signature(handler, "handler", self, *(event_args + args), **kwargs)
331
- self._event_handlers[event_name].append((handler, (self,) + args, kwargs))
342
+ self._event_handlers[event_name].append((handler, (weakref.ref(self),) + args, kwargs))
332
343
  except ValueError:
333
344
  _check_signature(handler, "handler", *(event_args + args), **kwargs)
334
345
  self._event_handlers[event_name].append((handler, args, kwargs))
@@ -432,7 +443,15 @@ class Engine(Serializable):
432
443
  self.last_event_name = event_name
433
444
  for func, args, kwargs in self._event_handlers[event_name]:
434
445
  kwargs.update(event_kwargs)
435
- first, others = ((args[0],), args[1:]) if (args and args[0] == self) else ((), args)
446
+ if args and isinstance(args[0], weakref.ref):
447
+ resolved_engine = args[0]()
448
+ if resolved_engine is None:
449
+ raise RuntimeError("Engine reference not resolved. Cannot execute event handler.")
450
+ first, others = ((resolved_engine,), args[1:])
451
+ else:
452
+ # metrics do not provide engine when registered
453
+ first, others = (tuple(), args) # type: ignore[assignment]
454
+
436
455
  func(*first, *(event_args + others), **kwargs)
437
456
 
438
457
  def fire_event(self, event_name: Any) -> None:
@@ -6,6 +6,7 @@ from ignite.handlers.checkpoint import Checkpoint, DiskSaver, ModelCheckpoint
6
6
  from ignite.handlers.clearml_logger import ClearMLLogger
7
7
  from ignite.handlers.early_stopping import EarlyStopping
8
8
  from ignite.handlers.ema_handler import EMAHandler
9
+ from ignite.handlers.fbresearch_logger import FBResearchLogger
9
10
  from ignite.handlers.lr_finder import FastaiLRFinder
10
11
  from ignite.handlers.mlflow_logger import MLflowLogger
11
12
  from ignite.handlers.neptune_logger import NeptuneLogger
@@ -64,6 +65,7 @@ __all__ = [
64
65
  "CyclicalScheduler",
65
66
  "create_lr_scheduler_with_warmup",
66
67
  "FastaiLRFinder",
68
+ "FBResearchLogger",
67
69
  "EMAHandler",
68
70
  "BasicTimeProfiler",
69
71
  "HandlersTimeProfiler",
@@ -21,10 +21,21 @@ else:
21
21
 
22
22
  import ignite.distributed as idist
23
23
  from ignite.base import Serializable
24
- from ignite.engine import Engine, Events
24
+ from ignite.engine import Engine, Events, EventEnum
25
25
  from ignite.utils import _tree_apply2, _tree_map
26
26
 
27
- __all__ = ["Checkpoint", "DiskSaver", "ModelCheckpoint", "BaseSaveHandler"]
27
+ __all__ = ["Checkpoint", "DiskSaver", "ModelCheckpoint", "BaseSaveHandler", "CheckpointEvents"]
28
+
29
+
30
+ class CheckpointEvents(EventEnum):
31
+ """Events fired by :class:`~ignite.handlers.checkpoint.Checkpoint`
32
+
33
+ - SAVED_CHECKPOINT : triggered when checkpoint handler has saved objects
34
+
35
+ .. versionadded:: 0.5.3
36
+ """
37
+
38
+ SAVED_CHECKPOINT = "saved_checkpoint"
28
39
 
29
40
 
30
41
  class BaseSaveHandler(metaclass=ABCMeta):
@@ -264,6 +275,29 @@ class Checkpoint(Serializable):
264
275
  to_save, save_handler=DiskSaver('/tmp/models', create_dir=True, **kwargs), n_saved=2
265
276
  )
266
277
 
278
+ Respond to checkpoint events:
279
+
280
+ .. code-block:: python
281
+
282
+ from ignite.handlers import Checkpoint
283
+ from ignite.engine import Engine, Events
284
+
285
+ checkpoint_handler = Checkpoint(
286
+ {'model': model, 'optimizer': optimizer},
287
+ save_dir,
288
+ n_saved=2
289
+ )
290
+
291
+ @trainer.on(Checkpoint.SAVED_CHECKPOINT)
292
+ def on_checkpoint_saved(engine):
293
+ print(f"Checkpoint saved at epoch {engine.state.epoch}")
294
+
295
+ trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler)
296
+
297
+ Attributes:
298
+ SAVED_CHECKPOINT: Alias of ``SAVED_CHECKPOINT`` from
299
+ :class:`~ignite.handlers.checkpoint.CheckpointEvents`.
300
+
267
301
  .. versionchanged:: 0.4.3
268
302
 
269
303
  - Checkpoint can save model with same filename.
@@ -274,8 +308,13 @@ class Checkpoint(Serializable):
274
308
  - `score_name` can be used to define `score_function` automatically without providing `score_function`.
275
309
  - `save_handler` automatically saves to disk if path to directory is provided.
276
310
  - `save_on_rank` saves objects on this rank in a distributed configuration.
311
+
312
+ .. versionchanged:: 0.5.3
313
+
314
+ - Added ``SAVED_CHECKPOINT`` class attribute.
277
315
  """
278
316
 
317
+ SAVED_CHECKPOINT = CheckpointEvents.SAVED_CHECKPOINT
279
318
  Item = NamedTuple("Item", [("priority", int), ("filename", str)])
280
319
  _state_dict_all_req_keys = ("_saved",)
281
320
 
@@ -400,6 +439,8 @@ class Checkpoint(Serializable):
400
439
  return new > self._saved[0].priority
401
440
 
402
441
  def __call__(self, engine: Engine) -> None:
442
+ if not engine.has_registered_events(CheckpointEvents.SAVED_CHECKPOINT):
443
+ engine.register_events(*CheckpointEvents)
403
444
  global_step = None
404
445
  if self.global_step_transform is not None:
405
446
  global_step = self.global_step_transform(engine, engine.last_event_name)
@@ -460,11 +501,11 @@ class Checkpoint(Serializable):
460
501
  if self.include_self:
461
502
  # Now that we've updated _saved, we can add our own state_dict.
462
503
  checkpoint["checkpointer"] = self.state_dict()
463
-
464
504
  try:
465
505
  self.save_handler(checkpoint, filename, metadata)
466
506
  except TypeError:
467
507
  self.save_handler(checkpoint, filename)
508
+ engine.fire_event(CheckpointEvents.SAVED_CHECKPOINT)
468
509
 
469
510
  def _setup_checkpoint(self) -> Dict[str, Any]:
470
511
  if self.to_save is not None:
@@ -118,6 +118,8 @@ class ClearMLLogger(BaseLogger):
118
118
 
119
119
  """
120
120
 
121
+ _task: Any
122
+
121
123
  def __init__(self, **kwargs: Any):
122
124
  try:
123
125
  from clearml import Task
@@ -823,6 +825,8 @@ class ClearMLSaver(DiskSaver):
823
825
 
824
826
  """
825
827
 
828
+ _task: Any
829
+
826
830
  def __init__(
827
831
  self,
828
832
  logger: Optional[ClearMLLogger] = None,
@@ -858,7 +862,7 @@ class ClearMLSaver(DiskSaver):
858
862
  except ImportError:
859
863
  try:
860
864
  # Backwards-compatibility for legacy Trains SDK
861
- from trains import Task
865
+ from trains import Task # type: ignore[no-redef]
862
866
  except ImportError:
863
867
  raise ModuleNotFoundError(
864
868
  "This contrib module requires clearml to be installed. "
@@ -933,7 +937,7 @@ class ClearMLSaver(DiskSaver):
933
937
  except ImportError:
934
938
  try:
935
939
  # Backwards-compatibility for legacy Trains SDK
936
- from trains.binding.frameworks import WeightsFileHandler
940
+ from trains.binding.frameworks import WeightsFileHandler # type: ignore[no-redef]
937
941
  except ImportError:
938
942
  raise ModuleNotFoundError(
939
943
  "This contrib module requires clearml to be installed. "
@@ -957,8 +961,8 @@ class ClearMLSaver(DiskSaver):
957
961
  metadata=metadata,
958
962
  )
959
963
 
960
- pre_cb_id = WeightsFileHandler.add_pre_callback(cb_context.pre_callback)
961
- post_cb_id = WeightsFileHandler.add_post_callback(cb_context.post_callback)
964
+ pre_cb_id = WeightsFileHandler.add_pre_callback(cb_context.pre_callback) # type: ignore[arg-type]
965
+ post_cb_id = WeightsFileHandler.add_post_callback(cb_context.post_callback) # type: ignore[arg-type]
962
966
 
963
967
  try:
964
968
  super(ClearMLSaver, self).__call__(checkpoint, filename, metadata)
@@ -7,7 +7,7 @@ import torch
7
7
 
8
8
  from ignite import utils
9
9
  from ignite.engine import Engine, Events
10
- from ignite.handlers import Timer
10
+ from ignite.handlers.timing import Timer
11
11
 
12
12
  MB = 1024.0 * 1024.0
13
13
 
@@ -154,7 +154,7 @@ class FBResearchLogger:
154
154
  if torch.cuda.is_available():
155
155
  cuda_max_mem = f"GPU Max Mem: {torch.cuda.max_memory_allocated() / MB:.0f} MB"
156
156
 
157
- current_iter = engine.state.iteration % (engine.state.epoch_length + 1)
157
+ current_iter = ((engine.state.iteration - 1) % engine.state.epoch_length) + 1
158
158
  iter_avg_time = self.iter_timer.value()
159
159
 
160
160
  eta_seconds = iter_avg_time * (engine.state.epoch_length - current_iter)
@@ -179,7 +179,7 @@ class FastaiLRFinder:
179
179
  lr = self._lr_schedule.get_param()
180
180
  self._history["lr"].append(lr)
181
181
  if trainer.state.iteration == 1:
182
- self._best_loss = loss
182
+ self._best_loss = loss # type: ignore[assignment]
183
183
  else:
184
184
  if smooth_f > 0:
185
185
  loss = smooth_f * loss + (1 - smooth_f) * self._history["loss"][-1]
@@ -200,7 +200,7 @@ class ProgressBar(BaseLogger):
200
200
  Accepted output value types are numbers, 0d and 1d torch tensors and strings.
201
201
 
202
202
  """
203
- desc = self.tqdm_kwargs.get("desc", None)
203
+ desc = self.tqdm_kwargs.get("desc", "")
204
204
 
205
205
  if event_name not in engine._allowed_events:
206
206
  raise ValueError(f"Logging event {event_name.name} is not in allowed events for this engine")
@@ -1,7 +1,7 @@
1
1
  """Visdom logger and its helper handlers."""
2
2
 
3
3
  import os
4
- from typing import Any, Callable, cast, Dict, List, Optional, Union
4
+ from typing import Any, Callable, Dict, List, Optional, Union
5
5
 
6
6
  import torch
7
7
  import torch.nn as nn
@@ -179,7 +179,7 @@ class VisdomLogger(BaseLogger):
179
179
  )
180
180
 
181
181
  if server is None:
182
- server = cast(str, os.environ.get("VISDOM_SERVER_URL", "localhost"))
182
+ server = os.environ.get("VISDOM_SERVER_URL", "localhost")
183
183
 
184
184
  if port is None:
185
185
  port = int(os.environ.get("VISDOM_PORT", 8097))
@@ -26,7 +26,7 @@ class WandBLogger(BaseLogger):
26
26
  Args:
27
27
  args: Positional arguments accepted by `wandb.init`.
28
28
  kwargs: Keyword arguments accepted by `wandb.init`.
29
- Please see `wandb.init <https://docs.wandb.ai/ref/python/init>`_ for documentation of possible parameters.
29
+ Please see `wandb.init <https://docs.wandb.ai/ref/python/sdk/functions/init/>`_ for documentation of possible parameters.
30
30
 
31
31
  Examples:
32
32
  .. code-block:: python
@@ -86,7 +86,7 @@ class CalinskiHarabaszScore(_ClusteringMetricBase):
86
86
 
87
87
  .. testoutput::
88
88
 
89
- 5.733936
89
+ 5.733935832977295
90
90
 
91
91
  .. versionadded:: 0.5.2
92
92
  """
@@ -86,7 +86,7 @@ class SilhouetteScore(_ClusteringMetricBase):
86
86
 
87
87
  .. testoutput::
88
88
 
89
- 0.12607366
89
+ 0.1260736584663391
90
90
 
91
91
  .. versionadded:: 0.5.2
92
92
  """
ignite/metrics/gan/fid.py CHANGED
@@ -31,13 +31,13 @@ def fid_score(
31
31
  except ImportError:
32
32
  raise ModuleNotFoundError("fid_score requires scipy to be installed.")
33
33
 
34
- mu1, mu2 = mu1.cpu(), mu2.cpu()
35
- sigma1, sigma2 = sigma1.cpu(), sigma2.cpu()
34
+ mu1, mu2 = mu1.detach().cpu(), mu2.detach().cpu()
35
+ sigma1, sigma2 = sigma1.detach().cpu(), sigma2.detach().cpu()
36
36
 
37
37
  diff = mu1 - mu2
38
38
 
39
39
  # Product might be almost singular
40
- covmean, _ = scipy.linalg.sqrtm(sigma1.mm(sigma2), disp=False)
40
+ covmean, _ = scipy.linalg.sqrtm(sigma1.mm(sigma2).numpy(), disp=False)
41
41
  # Numerical error might give slight imaginary component
42
42
  if np.iscomplexobj(covmean):
43
43
  if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
@@ -73,7 +73,7 @@ class JSDivergence(KLDivergence):
73
73
 
74
74
  .. testoutput::
75
75
 
76
- 0.16266516844431558
76
+ 0.1626...
77
77
 
78
78
  .. versionchanged:: 0.5.1
79
79
  ``skip_unrolling`` argument is added.
@@ -78,7 +78,7 @@ class MaximumMeanDiscrepancy(Metric):
78
78
 
79
79
  .. testoutput::
80
80
 
81
- 1.072697639465332
81
+ 1.0726...
82
82
 
83
83
  .. versionchanged:: 0.5.1
84
84
  ``skip_unrolling`` argument is added.
@@ -72,11 +72,11 @@ class Bleu(Metric):
72
72
 
73
73
  More details can be found in `Papineni et al. 2002`__.
74
74
 
75
- __ https://www.aclweb.org/anthology/P02-1040
75
+ __ https://aclanthology.org/P02-1040/
76
76
 
77
77
  In addition, a review of smoothing techniques can be found in `Chen et al. 2014`__
78
78
 
79
- __ https://aclanthology.org/W14-3346.pdf
79
+ __ https://aclanthology.org/W14-3346/
80
80
 
81
81
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
82
82
  - `y_pred` (list(list(str))) - a list of hypotheses sentences.
@@ -191,7 +191,7 @@ class RougeN(_BaseRouge):
191
191
 
192
192
  More details can be found in `Lin 2004`__.
193
193
 
194
- __ https://www.aclweb.org/anthology/W04-1013.pdf
194
+ __ https://aclanthology.org/W04-1013
195
195
 
196
196
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
197
197
  - `y_pred` (list(list(str))) must be a sequence of tokens.
@@ -265,7 +265,7 @@ class RougeL(_BaseRouge):
265
265
 
266
266
  More details can be found in `Lin 2004`__.
267
267
 
268
- __ https://www.aclweb.org/anthology/W04-1013.pdf
268
+ __ https://aclanthology.org/W04-1013
269
269
 
270
270
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
271
271
  - `y_pred` (list(list(str))) must be a sequence of tokens.
@@ -331,7 +331,7 @@ class Rouge(Metric):
331
331
 
332
332
  More details can be found in `Lin 2004`__.
333
333
 
334
- __ https://www.aclweb.org/anthology/W04-1013.pdf
334
+ __ https://aclanthology.org/W04-1013
335
335
 
336
336
  - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
337
337
  - `y_pred` (list(list(str))) must be a sequence of tokens.
@@ -63,7 +63,7 @@ def modified_precision(references: Sequence[Sequence[Any]], candidate: Any, n: i
63
63
 
64
64
  More details can be found in `Papineni et al. 2002`__.
65
65
 
66
- __ https://www.aclweb.org/anthology/P02-1040.pdf
66
+ __ https://aclanthology.org/P02-1040
67
67
 
68
68
  Args:
69
69
  references: list of references R
@@ -97,7 +97,7 @@ class PrecisionRecallCurve(EpochMetric):
97
97
  if len(self._predictions) < 1 or len(self._targets) < 1:
98
98
  raise NotComputableError("PrecisionRecallCurve must have at least one example before it can be computed.")
99
99
 
100
- if self._result is None: # type: ignore
100
+ if self._result is None:
101
101
  _prediction_tensor = torch.cat(self._predictions, dim=0)
102
102
  _target_tensor = torch.cat(self._targets, dim=0)
103
103
 
@@ -126,4 +126,4 @@ class PrecisionRecallCurve(EpochMetric):
126
126
 
127
127
  self._result = (precision, recall, thresholds) # type: ignore[assignment]
128
128
 
129
- return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result) # type: ignore
129
+ return cast(Tuple[torch.Tensor, torch.Tensor, torch.Tensor], self._result)
@@ -1,24 +1,17 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pytorch-ignite
3
- Version: 0.6.0.dev20250510
3
+ Version: 0.6.0.dev20251103
4
4
  Summary: A lightweight library to help with training neural networks in PyTorch.
5
- Home-page: https://github.com/pytorch/ignite
6
- Author: PyTorch-Ignite Team
7
- Author-email: contact@pytorch-ignite.ai
8
- License: BSD
9
- Description-Content-Type: text/markdown
5
+ Project-URL: Homepage, https://pytorch-ignite.ai
6
+ Project-URL: Repository, https://github.com/pytorch/ignite
7
+ Author-email: PyTorch-Ignite Team <contact@pytorch-ignite.ai>
8
+ License-Expression: BSD-3-Clause
10
9
  License-File: LICENSE
11
- Requires-Dist: torch<3,>=1.3
10
+ Classifier: Programming Language :: Python :: 3
11
+ Requires-Python: <=3.13,>=3.9
12
12
  Requires-Dist: packaging
13
- Dynamic: author
14
- Dynamic: author-email
15
- Dynamic: description
16
- Dynamic: description-content-type
17
- Dynamic: home-page
18
- Dynamic: license
19
- Dynamic: license-file
20
- Dynamic: requires-dist
21
- Dynamic: summary
13
+ Requires-Dist: torch<3,>=1.10
14
+ Description-Content-Type: text/markdown
22
15
 
23
16
  <div align="center">
24
17
 
@@ -419,7 +412,7 @@ Few pointers to get you started:
419
412
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pytorch/ignite/blob/master/examples/notebooks/FastaiLRFinder_MNIST.ipynb) [Basic example of LR finder on
420
413
  MNIST](https://github.com/pytorch/ignite/blob/master/examples/notebooks/FastaiLRFinder_MNIST.ipynb)
421
414
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pytorch/ignite/blob/master/examples/notebooks/Cifar100_bench_amp.ipynb) [Benchmark mixed precision training on Cifar100:
422
- torch.cuda.amp vs nvidia/apex](https://github.com/pytorch/ignite/blob/master/examples/notebooks/Cifar100_bench_amp.ipynb)
415
+ torch.amp vs nvidia/apex](https://github.com/pytorch/ignite/blob/master/examples/notebooks/Cifar100_bench_amp.ipynb)
423
416
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pytorch/ignite/blob/master/examples/notebooks/MNIST_on_TPU.ipynb) [MNIST training on a single
424
417
  TPU](https://github.com/pytorch/ignite/blob/master/examples/notebooks/MNIST_on_TPU.ipynb)
425
418
  - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1E9zJrptnLJ_PKhmaP5Vhb6DTVRvyrKHx) [CIFAR10 Training on multiple TPUs](https://github.com/pytorch/ignite/tree/master/examples/cifar10)
@@ -1,4 +1,4 @@
1
- ignite/__init__.py,sha256=yPZp6Ap3U-jzysePkO3QtTM4Qj5VQYyuIpmglD3EcTI,194
1
+ ignite/__init__.py,sha256=tyL6Ulkx95oYbGTYtecqCVA424B4eABtJNaYIY5ujds,194
2
2
  ignite/_utils.py,sha256=XDPpUDJ8ykLXWMV2AYTqGSj8XCfApsyzsQ3Vij_OB4M,182
3
3
  ignite/exceptions.py,sha256=5ZWCVLPC9rgoW8t84D-VeEleqz5O7XpAGPpCdU8rKd0,150
4
4
  ignite/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -52,21 +52,21 @@ ignite/distributed/utils.py,sha256=D97JwWgL9RKP8rTfDRf1zMmfRUeJizr7XfLZ8LAScOI,2
52
52
  ignite/distributed/comp_models/__init__.py,sha256=S2WHl463U7BvpcUe9-JaGtuCi3G1cMHFW5QFBQ6fv20,1357
53
53
  ignite/distributed/comp_models/base.py,sha256=pTIylP1h2g6NWopBEponfXC6UefqS1l2lEdzTUTNXFc,14185
54
54
  ignite/distributed/comp_models/horovod.py,sha256=Mn5xG92bzBT1Dd-0IxEaN342kihUwJNkLCqsxP06ijc,9076
55
- ignite/distributed/comp_models/native.py,sha256=2kiHxJjjoS1GHs8GRuHcQQqnoz-yBhxGwks9Hv6SFI8,28087
55
+ ignite/distributed/comp_models/native.py,sha256=oxJeK-Dc_dtLpG02wz29y5BvjJv_a3vEXPlRVRLWEIU,28098
56
56
  ignite/distributed/comp_models/xla.py,sha256=kVa5HrXaWt8TCLObxUMVpSsUnDv3SPOQ0dP2SlqpmLg,6281
57
- ignite/engine/__init__.py,sha256=ASvoTDzjl0ix7ZqHq_N3cDCNUy33gD_UUfs8-u9zni4,36126
57
+ ignite/engine/__init__.py,sha256=MRFj6yywKhVkov4ccPkrw4dX1O8PfqceiJkngrcFb7A,36094
58
58
  ignite/engine/deterministic.py,sha256=lIacEolZPmfPgVWM_T0Eqg-2G08Wpi_hc1fifzFq0p8,11632
59
- ignite/engine/engine.py,sha256=1EMp-SC5NZt8bl23rPKk7UeuQQqnFPVej3id7LoINko,60204
59
+ ignite/engine/engine.py,sha256=xeUx7QkjC4p6iLMCTzFDU7Pq7PXoaoFaRf8TcpIuSog,60915
60
60
  ignite/engine/events.py,sha256=ydfG3HPMo3HKcycFSG_GrZ199Tuttcjmd85eQaV_5c0,21807
61
61
  ignite/engine/utils.py,sha256=QG5mkdg4OipspqgpNQcJuoHTYdr2Sx5LS16kfjOHDdI,1073
62
- ignite/handlers/__init__.py,sha256=t2UbfFcFNjR_zh3_WHoa0N3n-n_oD1jibbJw0JC3LsE,2641
62
+ ignite/handlers/__init__.py,sha256=Qq85YTtHPcii6UAfMOoCPg9RwigH96iqxOJKIlRfDqw,2728
63
63
  ignite/handlers/base_logger.py,sha256=wPiGn9iCh5ung1GaRUf_qAlqe63h1NpUUQ0XK709p2k,13011
64
- ignite/handlers/checkpoint.py,sha256=xg0bEShzdAg7SKx8CLAPiWAy7pHi2Jm-EAQKb0NJpyo,44882
65
- ignite/handlers/clearml_logger.py,sha256=CSFJDhuQ6XaW8DnFA4BjHcqljfS3vofoId-4_7ImmLs,37813
64
+ ignite/handlers/checkpoint.py,sha256=1k_RhDW5rjkJB4oz2jNWxjpuGgLvbNCt7_-1Pqz9Lxg,46266
65
+ ignite/handlers/clearml_logger.py,sha256=12a9eue6hnFh5CrdSFz_EpGF0-XKRMlBXpR2NWWw8DY,37949
66
66
  ignite/handlers/early_stopping.py,sha256=UA6TiKho5CbD085R-16H8w3r0BYPQcWQjhEXg8aITSw,4139
67
67
  ignite/handlers/ema_handler.py,sha256=SmUyyWIFPZW3yMvjD_sSk5m_LfnMFl9R-uQdbXNFfY0,11854
68
- ignite/handlers/fbresearch_logger.py,sha256=onsUIHv6lYWcGf3VNeTWDCL_s1igK_PXmLGaTksjyk4,11120
69
- ignite/handlers/lr_finder.py,sha256=LdyBDQEg193mgTWJZHtZ8jP-L3giJSqSE3ffyo-wDoo,22117
68
+ ignite/handlers/fbresearch_logger.py,sha256=MfQeiBIXBYLEwZoDIld2oCceMeTAsz8rc5cd7fLtpJs,11133
69
+ ignite/handlers/lr_finder.py,sha256=AAQLUcRLrfkrVOQufmRNZqAOTw1MpmAAo8YLk3JkdQs,22145
70
70
  ignite/handlers/mlflow_logger.py,sha256=M5Mggrnr2wMsms8wbEaHqNtTk5L1zNs1MlPWD0ZCpDQ,13894
71
71
  ignite/handlers/neptune_logger.py,sha256=SrehLclS8ccyuxO_0HYPvt5SN8EB8g9KWFfqQMQsGAw,27298
72
72
  ignite/handlers/param_scheduler.py,sha256=c730LIS6obDNNH2jitc2BRDK6AO36FfD3e1x336Oen4,68261
@@ -78,10 +78,10 @@ ignite/handlers/terminate_on_nan.py,sha256=RFSKd3Oqn9Me2xLCos4lSE-hnY7fYWWjE9bli
78
78
  ignite/handlers/time_limit.py,sha256=heTuS-ReBbOUCm1NcNJGhzxI080Hanc4hOLB2Y4GeZk,1567
79
79
  ignite/handlers/time_profilers.py,sha256=GZCoOpiFSc2yVgHQjpS1To8Yjb6G6HwydsiWMjwMQfA,30301
80
80
  ignite/handlers/timing.py,sha256=nHeBHvPwYdPRMAx-jk_8MjZit4a7rmsmIWkUrajAG-s,4705
81
- ignite/handlers/tqdm_logger.py,sha256=NWAjrvnMt9k--lJ250W5LQ-AEzmMV_yx7StTpZJjEDM,13051
81
+ ignite/handlers/tqdm_logger.py,sha256=5N70XA9rRm2x6sWYAJB1U5Y_bky2fa3qhec8SVgB3hY,13049
82
82
  ignite/handlers/utils.py,sha256=X4LRqo1kqGsbmX0pEuZKYR6K4C8sZudAqxCLriiXtCg,872
83
- ignite/handlers/visdom_logger.py,sha256=XWDJHIqiYzV1ywapiltGvF0gj6sZ8Ixxdozx4UtvRGg,21847
84
- ignite/handlers/wandb_logger.py,sha256=fqTjvjOaxifmHVbKgbqejL_I29frslEakb6YY3-98dM,14686
83
+ ignite/handlers/visdom_logger.py,sha256=sg75ohEkDT7gYfEbLn5464GO-s0MLUEWxdFw_zSVSYw,21830
84
+ ignite/handlers/wandb_logger.py,sha256=gGvbFNjc6gCfVFfOXcnz3-P4sqqP-P9at1UwUV_mwMg,14701
85
85
  ignite/metrics/__init__.py,sha256=m-8F8J17r-aEwsO6Ww-8AqDRN59WFfYBwCDKwqGDSmI,3627
86
86
  ignite/metrics/accumulation.py,sha256=xWdsm9u6JfsfODX_GUKzQc_omrdFDJ4yELBR-xXgc4s,12448
87
87
  ignite/metrics/accuracy.py,sha256=rI1TG-7WdJxcqGCMxGErXBWLmTNP1yczJgjjRyby0No,10168
@@ -96,10 +96,10 @@ ignite/metrics/fbeta.py,sha256=2oDsRM7XXJ8LPVrn7iwLdRy75RLJELijmshtMQO3mJM,6870
96
96
  ignite/metrics/frequency.py,sha256=NW01rPgWnW1chVOSNAPCcPBu2CvjyXkoyFDAmjOK9A4,4037
97
97
  ignite/metrics/gpu_info.py,sha256=kcDIifr9js_P-32LddizEggvvL6eqFLYCHYeFDR4GL0,4301
98
98
  ignite/metrics/hsic.py,sha256=am-gor2mXY3H3u2vVNQGPJtkx_5W5JNZeukl2uYqajE,7099
99
- ignite/metrics/js_divergence.py,sha256=cng6KpR7I7znEEUcr434kJQ2uswgo5wbT5pAAcyVqdI,4838
99
+ ignite/metrics/js_divergence.py,sha256=HAgj12JwL9bT33cCSAX7g4EKSfqFNNehkgwZfJuncfw,4828
100
100
  ignite/metrics/kl_divergence.py,sha256=FdC5BT-nd8nmYqT95Xozw-hW0hZC6dtTklkpJdwWJ6o,5152
101
101
  ignite/metrics/loss.py,sha256=mB-zYptymtcyIys0OlbVgUOAqL2WHT2dCPMFda-Klpo,4818
102
- ignite/metrics/maximum_mean_discrepancy.py,sha256=FVBt-IJNrZo_zV0vwpsjaShDhl4Z5nJWVLRjTV-GpoE,6452
102
+ ignite/metrics/maximum_mean_discrepancy.py,sha256=AcrlYW6seQn3ZQKcnPIrLzYK2Ho0riGjuRsJmTNtCms,6444
103
103
  ignite/metrics/mean_absolute_error.py,sha256=gfbzoXNdyj9GCEzSxHXn0359TNNjnKBYshSnCBQk7i4,3695
104
104
  ignite/metrics/mean_average_precision.py,sha256=cXP9pYidQnAazGXBrhC80WoI4eK4lb3avNO5d70TLd4,19136
105
105
  ignite/metrics/mean_pairwise_distance.py,sha256=Ys6Rns6s-USS_tyP6Pa3bWZSI7f_hP5-lZM64UGJGjo,4104
@@ -110,7 +110,7 @@ ignite/metrics/metrics_lambda.py,sha256=NwKZ1J-KzFFbSw7YUaNJozdfKZLVqrkjQvFKT6ix
110
110
  ignite/metrics/multilabel_confusion_matrix.py,sha256=1pjLNPGTDJWAkN_BHdBPekcish6Ra0uRUeEbdj3Dm6Y,7377
111
111
  ignite/metrics/mutual_information.py,sha256=lu1ucVfkx01tGQfELyXzS9woCPOMVImFHfrbIXCvPe8,4692
112
112
  ignite/metrics/precision.py,sha256=xe8_e13cPMaC1Mfw-RTlmkag6pdcHCIbi70ASI1IahY,18622
113
- ignite/metrics/precision_recall_curve.py,sha256=PwhYP3BiGvop8KkjS1aknDTue27whB5vdkvhpcAxm2U,6260
113
+ ignite/metrics/precision_recall_curve.py,sha256=rcmG2W7dDuA_8fyekHNk4ronecewolMprW4rxUB8xsc,6228
114
114
  ignite/metrics/psnr.py,sha256=G994inwIczTWC5JfwECr0LSAtgquRGCs0283GylPR8c,5558
115
115
  ignite/metrics/recall.py,sha256=MaywS5E8ioaHZvTPGhQaYPQV-xDmptYuv8kDRe_-BEY,9867
116
116
  ignite/metrics/roc_auc.py,sha256=NW_8GKX9W2tSLXn_d9G2A69gkbG62HWOc_YdyzBYO2s,9207
@@ -120,17 +120,17 @@ ignite/metrics/ssim.py,sha256=_uJJdoHP4E4_sitcvFr9wTcoocK3iTxtSh_pA5J7Ss8,11766
120
120
  ignite/metrics/top_k_categorical_accuracy.py,sha256=pqsArVTSxnwt49S3lZFVqOkCXbzx-WPxfQnhtQ390RM,4706
121
121
  ignite/metrics/clustering/__init__.py,sha256=QljKwToBY-0fHblKbj1GsmP7rE5tlzHkrtw98MYEX44,233
122
122
  ignite/metrics/clustering/_base.py,sha256=lpQwtR54oTUrif7vQ7EE3ch8PJ91ECnzLov8z34gf5E,1526
123
- ignite/metrics/clustering/calinski_harabasz_score.py,sha256=i9DbAuFOFIgi7UVnHiiD_YHKnGgdItOyWqM-XrqLgwk,4654
123
+ ignite/metrics/clustering/calinski_harabasz_score.py,sha256=jePNE7u72jh8RYL8Sew9rDn3BX6ydYq5Z2FPst4pqB0,4663
124
124
  ignite/metrics/clustering/davies_bouldin_score.py,sha256=VGC0jA3_gh9s4v3bm7Cw-5IV1ZUbqssYmU3s-rmnl_8,4646
125
- ignite/metrics/clustering/silhouette_score.py,sha256=MewWftWKR17OmkeBHLbzG_3RJs7XvSnfjal2D_3U62c,5151
125
+ ignite/metrics/clustering/silhouette_score.py,sha256=Q9mMcyoR9woHwjxwrAPecFPhKA9bkptoKhhe5-mBfLA,5159
126
126
  ignite/metrics/gan/__init__.py,sha256=mBZQNI5uBd72iMyJs6GpbSBLEMm1-Lu1KtgmDAoH_4I,149
127
- ignite/metrics/gan/fid.py,sha256=QrpTNLLqw1mHPUU5_DfWpIapWH4AjlTXzFdF1IdT8So,10014
127
+ ignite/metrics/gan/fid.py,sha256=rqITDukGd7CgQAMY8GRVPSLVrkF3MjjFR8bxE6M1kpg,10058
128
128
  ignite/metrics/gan/inception_score.py,sha256=78_qrECWb_KsbLbo1lvDnvFJ9FsWPsbUi1aKWyvp8kg,5601
129
129
  ignite/metrics/gan/utils.py,sha256=3nihbBrcM9MRcu6r0p3x5SgZQ5V4aag20ZppM7j_HiM,3993
130
130
  ignite/metrics/nlp/__init__.py,sha256=TiDKRhw7lhZeoL2Cn4s306cKIuBbXl2fizN1ZepMhwI,168
131
- ignite/metrics/nlp/bleu.py,sha256=kUC-T02eVvCVW-ujnzgb7uKYG6k_LPH7MPwTNhYCy5w,11549
132
- ignite/metrics/nlp/rouge.py,sha256=ybdmmne0Td3oWR5KX0jNSTTShsse5p_TyAFR6DerWOc,15364
133
- ignite/metrics/nlp/utils.py,sha256=o6zWzT8lugNAQVxJq-SEDFI35ve5-P-1TwyVu9wZCpM,2353
131
+ ignite/metrics/nlp/bleu.py,sha256=NyQZ3CQB1xUnH_KWer5QtxkM_S_aiO3ok86UMxHaQ_w,11539
132
+ ignite/metrics/nlp/rouge.py,sha256=pcIBCFBybJczYnPxuoLibwzNXYOMxf_JtyFiJkgo10A,15328
133
+ ignite/metrics/nlp/utils.py,sha256=CA0MRMk9l97QockFYYhU6k0-hLhP3GwW36ONZ7TRqmc,2341
134
134
  ignite/metrics/regression/__init__.py,sha256=I594yB38ypWi9IDi9rrdshdXeBnSRcST09tnLRjN0yk,1472
135
135
  ignite/metrics/regression/_base.py,sha256=5V6GkkaBYRuW9J3yDXucyTZp1XJ2uIG7F4w2XcBsd3w,2365
136
136
  ignite/metrics/regression/canberra_metric.py,sha256=HqQe-0lfwMMO5e_8hBIaAPS6PyKrIEtBKfRBNJV941Q,3077
@@ -153,8 +153,7 @@ ignite/metrics/regression/spearman_correlation.py,sha256=IzmN4WIe7C4cTUU3BOkBmaw
153
153
  ignite/metrics/regression/wave_hedges_distance.py,sha256=Ji_NRUgnZ3lJgi5fyNFLRjbHO648z4dBmqVDQU9ImKA,2792
154
154
  ignite/metrics/vision/__init__.py,sha256=lPBAEq1idc6Q17poFm1SjttE27irHF1-uNeiwrxnLrU,159
155
155
  ignite/metrics/vision/object_detection_average_precision_recall.py,sha256=PwdXVeGAF0sLIxUrvnnE7ZojpFNkZB5O6bYoopqc3M4,25024
156
- pytorch_ignite-0.6.0.dev20250510.dist-info/licenses/LICENSE,sha256=SwJvaRmy1ql-k9_nL4WnER4_ODTMF9fWoP9HXkoicgw,1527
157
- pytorch_ignite-0.6.0.dev20250510.dist-info/METADATA,sha256=S3UaVwJVvYli1Ldw3RZ0hx1XsU59V3LmFsc8v1eGkvw,28019
158
- pytorch_ignite-0.6.0.dev20250510.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
159
- pytorch_ignite-0.6.0.dev20250510.dist-info/top_level.txt,sha256=P2CnXR6kxvOX7ZMdd-9kVUTwLNz98t0sdjKeyvFBkR4,7
160
- pytorch_ignite-0.6.0.dev20250510.dist-info/RECORD,,
156
+ pytorch_ignite-0.6.0.dev20251103.dist-info/METADATA,sha256=L4NDgHM4p5mtFYV7Fbr_OBleFmh2cfvlESJWn0QAyNo,27979
157
+ pytorch_ignite-0.6.0.dev20251103.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
158
+ pytorch_ignite-0.6.0.dev20251103.dist-info/licenses/LICENSE,sha256=SwJvaRmy1ql-k9_nL4WnER4_ODTMF9fWoP9HXkoicgw,1527
159
+ pytorch_ignite-0.6.0.dev20251103.dist-info/RECORD,,
@@ -1,5 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.1)
2
+ Generator: hatchling 1.27.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
-